sh_veu.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * sh-mobile VEU mem2mem driver
  4. *
  5. * Copyright (C) 2012 Renesas Electronics Corporation
  6. * Author: Guennadi Liakhovetski, <g.liakhovetski@gmx.de>
  7. * Copyright (C) 2008 Magnus Damm
  8. */
  9. #include <linux/err.h>
  10. #include <linux/fs.h>
  11. #include <linux/kernel.h>
  12. #include <linux/module.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/platform_device.h>
  16. #include <linux/pm_runtime.h>
  17. #include <linux/slab.h>
  18. #include <linux/types.h>
  19. #include <linux/videodev2.h>
  20. #include <media/v4l2-dev.h>
  21. #include <media/v4l2-device.h>
  22. #include <media/v4l2-ioctl.h>
  23. #include <media/v4l2-mem2mem.h>
  24. #include <media/v4l2-image-sizes.h>
  25. #include <media/videobuf2-dma-contig.h>
  26. #define VEU_STR 0x00 /* start register */
  27. #define VEU_SWR 0x10 /* src: line length */
  28. #define VEU_SSR 0x14 /* src: image size */
  29. #define VEU_SAYR 0x18 /* src: y/rgb plane address */
  30. #define VEU_SACR 0x1c /* src: c plane address */
  31. #define VEU_BSSR 0x20 /* bundle mode register */
  32. #define VEU_EDWR 0x30 /* dst: line length */
  33. #define VEU_DAYR 0x34 /* dst: y/rgb plane address */
  34. #define VEU_DACR 0x38 /* dst: c plane address */
  35. #define VEU_TRCR 0x50 /* transform control */
  36. #define VEU_RFCR 0x54 /* resize scale */
  37. #define VEU_RFSR 0x58 /* resize clip */
  38. #define VEU_ENHR 0x5c /* enhance */
  39. #define VEU_FMCR 0x70 /* filter mode */
  40. #define VEU_VTCR 0x74 /* lowpass vertical */
  41. #define VEU_HTCR 0x78 /* lowpass horizontal */
  42. #define VEU_APCR 0x80 /* color match */
  43. #define VEU_ECCR 0x84 /* color replace */
  44. #define VEU_AFXR 0x90 /* fixed mode */
  45. #define VEU_SWPR 0x94 /* swap */
  46. #define VEU_EIER 0xa0 /* interrupt mask */
  47. #define VEU_EVTR 0xa4 /* interrupt event */
  48. #define VEU_STAR 0xb0 /* status */
  49. #define VEU_BSRR 0xb4 /* reset */
  50. #define VEU_MCR00 0x200 /* color conversion matrix coefficient 00 */
  51. #define VEU_MCR01 0x204 /* color conversion matrix coefficient 01 */
  52. #define VEU_MCR02 0x208 /* color conversion matrix coefficient 02 */
  53. #define VEU_MCR10 0x20c /* color conversion matrix coefficient 10 */
  54. #define VEU_MCR11 0x210 /* color conversion matrix coefficient 11 */
  55. #define VEU_MCR12 0x214 /* color conversion matrix coefficient 12 */
  56. #define VEU_MCR20 0x218 /* color conversion matrix coefficient 20 */
  57. #define VEU_MCR21 0x21c /* color conversion matrix coefficient 21 */
  58. #define VEU_MCR22 0x220 /* color conversion matrix coefficient 22 */
  59. #define VEU_COFFR 0x224 /* color conversion offset */
  60. #define VEU_CBR 0x228 /* color conversion clip */
  61. /*
  62. * 4092x4092 max size is the normal case. In some cases it can be reduced to
  63. * 2048x2048, in other cases it can be 4092x8188 or even 8188x8188.
  64. */
  65. #define MAX_W 4092
  66. #define MAX_H 4092
  67. #define MIN_W 8
  68. #define MIN_H 8
  69. #define ALIGN_W 4
  70. /* 3 buffers of 2048 x 1536 - 3 megapixels @ 16bpp */
  71. #define VIDEO_MEM_LIMIT ALIGN(2048 * 1536 * 2 * 3, 1024 * 1024)
  72. #define MEM2MEM_DEF_TRANSLEN 1
  73. struct sh_veu_dev;
  74. struct sh_veu_file {
  75. struct v4l2_fh fh;
  76. struct sh_veu_dev *veu_dev;
  77. bool cfg_needed;
  78. };
  79. struct sh_veu_format {
  80. u32 fourcc;
  81. unsigned int depth;
  82. unsigned int ydepth;
  83. };
  84. /* video data format */
  85. struct sh_veu_vfmt {
  86. /* Replace with v4l2_rect */
  87. struct v4l2_rect frame;
  88. unsigned int bytesperline;
  89. unsigned int offset_y;
  90. unsigned int offset_c;
  91. const struct sh_veu_format *fmt;
  92. };
  93. struct sh_veu_dev {
  94. struct v4l2_device v4l2_dev;
  95. struct video_device vdev;
  96. struct v4l2_m2m_dev *m2m_dev;
  97. struct device *dev;
  98. struct v4l2_m2m_ctx *m2m_ctx;
  99. struct sh_veu_vfmt vfmt_out;
  100. struct sh_veu_vfmt vfmt_in;
  101. /* Only single user per direction so far */
  102. struct sh_veu_file *capture;
  103. struct sh_veu_file *output;
  104. struct mutex fop_lock;
  105. void __iomem *base;
  106. spinlock_t lock;
  107. bool is_2h;
  108. unsigned int xaction;
  109. bool aborting;
  110. };
  111. enum sh_veu_fmt_idx {
  112. SH_VEU_FMT_NV12,
  113. SH_VEU_FMT_NV16,
  114. SH_VEU_FMT_NV24,
  115. SH_VEU_FMT_RGB332,
  116. SH_VEU_FMT_RGB444,
  117. SH_VEU_FMT_RGB565,
  118. SH_VEU_FMT_RGB666,
  119. SH_VEU_FMT_RGB24,
  120. };
  121. #define DEFAULT_IN_WIDTH VGA_WIDTH
  122. #define DEFAULT_IN_HEIGHT VGA_HEIGHT
  123. #define DEFAULT_IN_FMTIDX SH_VEU_FMT_NV12
  124. #define DEFAULT_OUT_WIDTH VGA_WIDTH
  125. #define DEFAULT_OUT_HEIGHT VGA_HEIGHT
  126. #define DEFAULT_OUT_FMTIDX SH_VEU_FMT_RGB565
  127. /*
  128. * Alignment: Y-plane should be 4-byte aligned for NV12 and NV16, and 8-byte
  129. * aligned for NV24.
  130. */
  131. static const struct sh_veu_format sh_veu_fmt[] = {
  132. [SH_VEU_FMT_NV12] = { .ydepth = 8, .depth = 12, .fourcc = V4L2_PIX_FMT_NV12 },
  133. [SH_VEU_FMT_NV16] = { .ydepth = 8, .depth = 16, .fourcc = V4L2_PIX_FMT_NV16 },
  134. [SH_VEU_FMT_NV24] = { .ydepth = 8, .depth = 24, .fourcc = V4L2_PIX_FMT_NV24 },
  135. [SH_VEU_FMT_RGB332] = { .ydepth = 8, .depth = 8, .fourcc = V4L2_PIX_FMT_RGB332 },
  136. [SH_VEU_FMT_RGB444] = { .ydepth = 16, .depth = 16, .fourcc = V4L2_PIX_FMT_RGB444 },
  137. [SH_VEU_FMT_RGB565] = { .ydepth = 16, .depth = 16, .fourcc = V4L2_PIX_FMT_RGB565 },
  138. [SH_VEU_FMT_RGB666] = { .ydepth = 32, .depth = 32, .fourcc = V4L2_PIX_FMT_BGR666 },
  139. [SH_VEU_FMT_RGB24] = { .ydepth = 24, .depth = 24, .fourcc = V4L2_PIX_FMT_RGB24 },
  140. };
  141. #define DEFAULT_IN_VFMT (struct sh_veu_vfmt){ \
  142. .frame = { \
  143. .width = VGA_WIDTH, \
  144. .height = VGA_HEIGHT, \
  145. }, \
  146. .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_IN_FMTIDX].ydepth) >> 3, \
  147. .fmt = &sh_veu_fmt[DEFAULT_IN_FMTIDX], \
  148. }
  149. #define DEFAULT_OUT_VFMT (struct sh_veu_vfmt){ \
  150. .frame = { \
  151. .width = VGA_WIDTH, \
  152. .height = VGA_HEIGHT, \
  153. }, \
  154. .bytesperline = (VGA_WIDTH * sh_veu_fmt[DEFAULT_OUT_FMTIDX].ydepth) >> 3, \
  155. .fmt = &sh_veu_fmt[DEFAULT_OUT_FMTIDX], \
  156. }
  157. /*
  158. * TODO: add support for further output formats:
  159. * SH_VEU_FMT_NV12,
  160. * SH_VEU_FMT_NV16,
  161. * SH_VEU_FMT_NV24,
  162. * SH_VEU_FMT_RGB332,
  163. * SH_VEU_FMT_RGB444,
  164. * SH_VEU_FMT_RGB666,
  165. * SH_VEU_FMT_RGB24,
  166. */
  167. static const int sh_veu_fmt_out[] = {
  168. SH_VEU_FMT_RGB565,
  169. };
  170. /*
  171. * TODO: add support for further input formats:
  172. * SH_VEU_FMT_NV16,
  173. * SH_VEU_FMT_NV24,
  174. * SH_VEU_FMT_RGB565,
  175. * SH_VEU_FMT_RGB666,
  176. * SH_VEU_FMT_RGB24,
  177. */
  178. static const int sh_veu_fmt_in[] = {
  179. SH_VEU_FMT_NV12,
  180. };
  181. static enum v4l2_colorspace sh_veu_4cc2cspace(u32 fourcc)
  182. {
  183. switch (fourcc) {
  184. default:
  185. BUG();
  186. case V4L2_PIX_FMT_NV12:
  187. case V4L2_PIX_FMT_NV16:
  188. case V4L2_PIX_FMT_NV24:
  189. return V4L2_COLORSPACE_SMPTE170M;
  190. case V4L2_PIX_FMT_RGB332:
  191. case V4L2_PIX_FMT_RGB444:
  192. case V4L2_PIX_FMT_RGB565:
  193. case V4L2_PIX_FMT_BGR666:
  194. case V4L2_PIX_FMT_RGB24:
  195. return V4L2_COLORSPACE_SRGB;
  196. }
  197. }
  198. static u32 sh_veu_reg_read(struct sh_veu_dev *veu, unsigned int reg)
  199. {
  200. return ioread32(veu->base + reg);
  201. }
  202. static void sh_veu_reg_write(struct sh_veu_dev *veu, unsigned int reg,
  203. u32 value)
  204. {
  205. iowrite32(value, veu->base + reg);
  206. }
  207. /* ========== mem2mem callbacks ========== */
  208. static void sh_veu_job_abort(void *priv)
  209. {
  210. struct sh_veu_dev *veu = priv;
  211. /* Will cancel the transaction in the next interrupt handler */
  212. veu->aborting = true;
  213. }
  214. static void sh_veu_process(struct sh_veu_dev *veu,
  215. struct vb2_buffer *src_buf,
  216. struct vb2_buffer *dst_buf)
  217. {
  218. dma_addr_t addr = vb2_dma_contig_plane_dma_addr(dst_buf, 0);
  219. sh_veu_reg_write(veu, VEU_DAYR, addr + veu->vfmt_out.offset_y);
  220. sh_veu_reg_write(veu, VEU_DACR, veu->vfmt_out.offset_c ?
  221. addr + veu->vfmt_out.offset_c : 0);
  222. dev_dbg(veu->dev, "%s(): dst base %lx, y: %x, c: %x\n", __func__,
  223. (unsigned long)addr,
  224. veu->vfmt_out.offset_y, veu->vfmt_out.offset_c);
  225. addr = vb2_dma_contig_plane_dma_addr(src_buf, 0);
  226. sh_veu_reg_write(veu, VEU_SAYR, addr + veu->vfmt_in.offset_y);
  227. sh_veu_reg_write(veu, VEU_SACR, veu->vfmt_in.offset_c ?
  228. addr + veu->vfmt_in.offset_c : 0);
  229. dev_dbg(veu->dev, "%s(): src base %lx, y: %x, c: %x\n", __func__,
  230. (unsigned long)addr,
  231. veu->vfmt_in.offset_y, veu->vfmt_in.offset_c);
  232. sh_veu_reg_write(veu, VEU_STR, 1);
  233. sh_veu_reg_write(veu, VEU_EIER, 1); /* enable interrupt in VEU */
  234. }
  235. /*
  236. * sh_veu_device_run() - prepares and starts the device
  237. *
  238. * This will be called by the framework when it decides to schedule a particular
  239. * instance.
  240. */
  241. static void sh_veu_device_run(void *priv)
  242. {
  243. struct sh_veu_dev *veu = priv;
  244. struct vb2_v4l2_buffer *src_buf, *dst_buf;
  245. src_buf = v4l2_m2m_next_src_buf(veu->m2m_ctx);
  246. dst_buf = v4l2_m2m_next_dst_buf(veu->m2m_ctx);
  247. if (src_buf && dst_buf)
  248. sh_veu_process(veu, &src_buf->vb2_buf, &dst_buf->vb2_buf);
  249. }
  250. /* ========== video ioctls ========== */
  251. static bool sh_veu_is_streamer(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
  252. enum v4l2_buf_type type)
  253. {
  254. return (type == V4L2_BUF_TYPE_VIDEO_CAPTURE &&
  255. veu_file == veu->capture) ||
  256. (type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
  257. veu_file == veu->output);
  258. }
  259. static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
  260. struct vb2_queue *dst_vq);
  261. /*
  262. * It is not unusual to have video nodes open()ed multiple times. While some
  263. * V4L2 operations are non-intrusive, like querying formats and various
  264. * parameters, others, like setting formats, starting and stopping streaming,
  265. * queuing and dequeuing buffers, directly affect hardware configuration and /
  266. * or execution. This function verifies availability of the requested interface
  267. * and, if available, reserves it for the requesting user.
  268. */
  269. static int sh_veu_stream_init(struct sh_veu_dev *veu, struct sh_veu_file *veu_file,
  270. enum v4l2_buf_type type)
  271. {
  272. struct sh_veu_file **stream;
  273. switch (type) {
  274. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  275. stream = &veu->capture;
  276. break;
  277. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  278. stream = &veu->output;
  279. break;
  280. default:
  281. return -EINVAL;
  282. }
  283. if (*stream == veu_file)
  284. return 0;
  285. if (*stream)
  286. return -EBUSY;
  287. *stream = veu_file;
  288. return 0;
  289. }
  290. static int sh_veu_context_init(struct sh_veu_dev *veu)
  291. {
  292. if (veu->m2m_ctx)
  293. return 0;
  294. veu->m2m_ctx = v4l2_m2m_ctx_init(veu->m2m_dev, veu,
  295. sh_veu_queue_init);
  296. return PTR_ERR_OR_ZERO(veu->m2m_ctx);
  297. }
  298. static int sh_veu_querycap(struct file *file, void *priv,
  299. struct v4l2_capability *cap)
  300. {
  301. strscpy(cap->driver, "sh-veu", sizeof(cap->driver));
  302. strscpy(cap->card, "sh-mobile VEU", sizeof(cap->card));
  303. strscpy(cap->bus_info, "platform:sh-veu", sizeof(cap->bus_info));
  304. return 0;
  305. }
  306. static int sh_veu_enum_fmt(struct v4l2_fmtdesc *f, const int *fmt, int fmt_num)
  307. {
  308. if (f->index >= fmt_num)
  309. return -EINVAL;
  310. f->pixelformat = sh_veu_fmt[fmt[f->index]].fourcc;
  311. return 0;
  312. }
  313. static int sh_veu_enum_fmt_vid_cap(struct file *file, void *priv,
  314. struct v4l2_fmtdesc *f)
  315. {
  316. return sh_veu_enum_fmt(f, sh_veu_fmt_out, ARRAY_SIZE(sh_veu_fmt_out));
  317. }
  318. static int sh_veu_enum_fmt_vid_out(struct file *file, void *priv,
  319. struct v4l2_fmtdesc *f)
  320. {
  321. return sh_veu_enum_fmt(f, sh_veu_fmt_in, ARRAY_SIZE(sh_veu_fmt_in));
  322. }
  323. static struct sh_veu_vfmt *sh_veu_get_vfmt(struct sh_veu_dev *veu,
  324. enum v4l2_buf_type type)
  325. {
  326. switch (type) {
  327. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  328. return &veu->vfmt_out;
  329. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  330. return &veu->vfmt_in;
  331. default:
  332. return NULL;
  333. }
  334. }
  335. static int sh_veu_g_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
  336. {
  337. struct v4l2_pix_format *pix = &f->fmt.pix;
  338. struct sh_veu_dev *veu = veu_file->veu_dev;
  339. struct sh_veu_vfmt *vfmt;
  340. vfmt = sh_veu_get_vfmt(veu, f->type);
  341. pix->width = vfmt->frame.width;
  342. pix->height = vfmt->frame.height;
  343. pix->field = V4L2_FIELD_NONE;
  344. pix->pixelformat = vfmt->fmt->fourcc;
  345. pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
  346. pix->bytesperline = vfmt->bytesperline;
  347. pix->sizeimage = vfmt->bytesperline * pix->height *
  348. vfmt->fmt->depth / vfmt->fmt->ydepth;
  349. dev_dbg(veu->dev, "%s(): type: %d, size %u @ %ux%u, fmt %x\n", __func__,
  350. f->type, pix->sizeimage, pix->width, pix->height, pix->pixelformat);
  351. return 0;
  352. }
  353. static int sh_veu_g_fmt_vid_out(struct file *file, void *priv,
  354. struct v4l2_format *f)
  355. {
  356. return sh_veu_g_fmt(priv, f);
  357. }
  358. static int sh_veu_g_fmt_vid_cap(struct file *file, void *priv,
  359. struct v4l2_format *f)
  360. {
  361. return sh_veu_g_fmt(priv, f);
  362. }
  363. static int sh_veu_try_fmt(struct v4l2_format *f, const struct sh_veu_format *fmt)
  364. {
  365. struct v4l2_pix_format *pix = &f->fmt.pix;
  366. unsigned int y_bytes_used;
  367. /*
  368. * V4L2 specification suggests, that the driver should correct the
  369. * format struct if any of the dimensions is unsupported
  370. */
  371. switch (pix->field) {
  372. default:
  373. case V4L2_FIELD_ANY:
  374. pix->field = V4L2_FIELD_NONE;
  375. /* fall through: continue handling V4L2_FIELD_NONE */
  376. case V4L2_FIELD_NONE:
  377. break;
  378. }
  379. v4l_bound_align_image(&pix->width, MIN_W, MAX_W, ALIGN_W,
  380. &pix->height, MIN_H, MAX_H, 0, 0);
  381. y_bytes_used = (pix->width * fmt->ydepth) >> 3;
  382. if (pix->bytesperline < y_bytes_used)
  383. pix->bytesperline = y_bytes_used;
  384. pix->sizeimage = pix->height * pix->bytesperline * fmt->depth / fmt->ydepth;
  385. pix->pixelformat = fmt->fourcc;
  386. pix->colorspace = sh_veu_4cc2cspace(pix->pixelformat);
  387. pr_debug("%s(): type: %d, size %u\n", __func__, f->type, pix->sizeimage);
  388. return 0;
  389. }
  390. static const struct sh_veu_format *sh_veu_find_fmt(const struct v4l2_format *f)
  391. {
  392. const int *fmt;
  393. int i, n, dflt;
  394. pr_debug("%s(%d;%d)\n", __func__, f->type, f->fmt.pix.field);
  395. switch (f->type) {
  396. case V4L2_BUF_TYPE_VIDEO_CAPTURE:
  397. fmt = sh_veu_fmt_out;
  398. n = ARRAY_SIZE(sh_veu_fmt_out);
  399. dflt = DEFAULT_OUT_FMTIDX;
  400. break;
  401. case V4L2_BUF_TYPE_VIDEO_OUTPUT:
  402. default:
  403. fmt = sh_veu_fmt_in;
  404. n = ARRAY_SIZE(sh_veu_fmt_in);
  405. dflt = DEFAULT_IN_FMTIDX;
  406. break;
  407. }
  408. for (i = 0; i < n; i++)
  409. if (sh_veu_fmt[fmt[i]].fourcc == f->fmt.pix.pixelformat)
  410. return &sh_veu_fmt[fmt[i]];
  411. return &sh_veu_fmt[dflt];
  412. }
  413. static int sh_veu_try_fmt_vid_cap(struct file *file, void *priv,
  414. struct v4l2_format *f)
  415. {
  416. const struct sh_veu_format *fmt;
  417. fmt = sh_veu_find_fmt(f);
  418. return sh_veu_try_fmt(f, fmt);
  419. }
  420. static int sh_veu_try_fmt_vid_out(struct file *file, void *priv,
  421. struct v4l2_format *f)
  422. {
  423. const struct sh_veu_format *fmt;
  424. fmt = sh_veu_find_fmt(f);
  425. return sh_veu_try_fmt(f, fmt);
  426. }
  427. static void sh_veu_colour_offset(struct sh_veu_dev *veu, struct sh_veu_vfmt *vfmt)
  428. {
  429. /* dst_left and dst_top validity will be verified in CROP / COMPOSE */
  430. unsigned int left = vfmt->frame.left & ~0x03;
  431. unsigned int top = vfmt->frame.top;
  432. dma_addr_t offset = (dma_addr_t)top * veu->vfmt_out.bytesperline +
  433. (((dma_addr_t)left * veu->vfmt_out.fmt->depth) >> 3);
  434. unsigned int y_line;
  435. vfmt->offset_y = offset;
  436. switch (vfmt->fmt->fourcc) {
  437. case V4L2_PIX_FMT_NV12:
  438. case V4L2_PIX_FMT_NV16:
  439. case V4L2_PIX_FMT_NV24:
  440. y_line = ALIGN(vfmt->frame.width, 16);
  441. vfmt->offset_c = offset + y_line * vfmt->frame.height;
  442. break;
  443. case V4L2_PIX_FMT_RGB332:
  444. case V4L2_PIX_FMT_RGB444:
  445. case V4L2_PIX_FMT_RGB565:
  446. case V4L2_PIX_FMT_BGR666:
  447. case V4L2_PIX_FMT_RGB24:
  448. vfmt->offset_c = 0;
  449. break;
  450. default:
  451. BUG();
  452. }
  453. }
  454. static int sh_veu_s_fmt(struct sh_veu_file *veu_file, struct v4l2_format *f)
  455. {
  456. struct v4l2_pix_format *pix = &f->fmt.pix;
  457. struct sh_veu_dev *veu = veu_file->veu_dev;
  458. struct sh_veu_vfmt *vfmt;
  459. struct vb2_queue *vq;
  460. int ret = sh_veu_context_init(veu);
  461. if (ret < 0)
  462. return ret;
  463. vq = v4l2_m2m_get_vq(veu->m2m_ctx, f->type);
  464. if (!vq)
  465. return -EINVAL;
  466. if (vb2_is_busy(vq)) {
  467. v4l2_err(&veu_file->veu_dev->v4l2_dev, "%s queue busy\n", __func__);
  468. return -EBUSY;
  469. }
  470. vfmt = sh_veu_get_vfmt(veu, f->type);
  471. /* called after try_fmt(), hence vfmt != NULL. Implicit BUG_ON() below */
  472. vfmt->fmt = sh_veu_find_fmt(f);
  473. /* vfmt->fmt != NULL following the same argument as above */
  474. vfmt->frame.width = pix->width;
  475. vfmt->frame.height = pix->height;
  476. vfmt->bytesperline = pix->bytesperline;
  477. sh_veu_colour_offset(veu, vfmt);
  478. /*
  479. * We could also verify and require configuration only if any parameters
  480. * actually have changed, but it is unlikely, that the user requests the
  481. * same configuration several times without closing the device.
  482. */
  483. veu_file->cfg_needed = true;
  484. dev_dbg(veu->dev,
  485. "Setting format for type %d, wxh: %dx%d, fmt: %x\n",
  486. f->type, pix->width, pix->height, vfmt->fmt->fourcc);
  487. return 0;
  488. }
  489. static int sh_veu_s_fmt_vid_cap(struct file *file, void *priv,
  490. struct v4l2_format *f)
  491. {
  492. int ret = sh_veu_try_fmt_vid_cap(file, priv, f);
  493. if (ret)
  494. return ret;
  495. return sh_veu_s_fmt(priv, f);
  496. }
  497. static int sh_veu_s_fmt_vid_out(struct file *file, void *priv,
  498. struct v4l2_format *f)
  499. {
  500. int ret = sh_veu_try_fmt_vid_out(file, priv, f);
  501. if (ret)
  502. return ret;
  503. return sh_veu_s_fmt(priv, f);
  504. }
  505. static int sh_veu_reqbufs(struct file *file, void *priv,
  506. struct v4l2_requestbuffers *reqbufs)
  507. {
  508. struct sh_veu_file *veu_file = priv;
  509. struct sh_veu_dev *veu = veu_file->veu_dev;
  510. int ret = sh_veu_context_init(veu);
  511. if (ret < 0)
  512. return ret;
  513. ret = sh_veu_stream_init(veu, veu_file, reqbufs->type);
  514. if (ret < 0)
  515. return ret;
  516. return v4l2_m2m_reqbufs(file, veu->m2m_ctx, reqbufs);
  517. }
  518. static int sh_veu_querybuf(struct file *file, void *priv,
  519. struct v4l2_buffer *buf)
  520. {
  521. struct sh_veu_file *veu_file = priv;
  522. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
  523. return -EBUSY;
  524. return v4l2_m2m_querybuf(file, veu_file->veu_dev->m2m_ctx, buf);
  525. }
  526. static int sh_veu_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
  527. {
  528. struct sh_veu_file *veu_file = priv;
  529. dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
  530. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
  531. return -EBUSY;
  532. return v4l2_m2m_qbuf(file, veu_file->veu_dev->m2m_ctx, buf);
  533. }
  534. static int sh_veu_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
  535. {
  536. struct sh_veu_file *veu_file = priv;
  537. dev_dbg(veu_file->veu_dev->dev, "%s(%d)\n", __func__, buf->type);
  538. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, buf->type))
  539. return -EBUSY;
  540. return v4l2_m2m_dqbuf(file, veu_file->veu_dev->m2m_ctx, buf);
  541. }
  542. static void sh_veu_calc_scale(struct sh_veu_dev *veu,
  543. int size_in, int size_out, int crop_out,
  544. u32 *mant, u32 *frac, u32 *rep)
  545. {
  546. u32 fixpoint;
  547. /* calculate FRAC and MANT */
  548. *rep = *mant = *frac = 0;
  549. if (size_in == size_out) {
  550. if (crop_out != size_out)
  551. *mant = 1; /* needed for cropping */
  552. return;
  553. }
  554. /* VEU2H special upscale */
  555. if (veu->is_2h && size_out > size_in) {
  556. u32 fixpoint = (4096 * size_in) / size_out;
  557. *mant = fixpoint / 4096;
  558. *frac = (fixpoint - (*mant * 4096)) & ~0x07;
  559. switch (*frac) {
  560. case 0x800:
  561. *rep = 1;
  562. break;
  563. case 0x400:
  564. *rep = 3;
  565. break;
  566. case 0x200:
  567. *rep = 7;
  568. break;
  569. }
  570. if (*rep)
  571. return;
  572. }
  573. fixpoint = (4096 * (size_in - 1)) / (size_out + 1);
  574. *mant = fixpoint / 4096;
  575. *frac = fixpoint - (*mant * 4096);
  576. if (*frac & 0x07) {
  577. /*
  578. * FIXME: do we really have to round down twice in the
  579. * up-scaling case?
  580. */
  581. *frac &= ~0x07;
  582. if (size_out > size_in)
  583. *frac -= 8; /* round down if scaling up */
  584. else
  585. *frac += 8; /* round up if scaling down */
  586. }
  587. }
  588. static unsigned long sh_veu_scale_v(struct sh_veu_dev *veu,
  589. int size_in, int size_out, int crop_out)
  590. {
  591. u32 mant, frac, value, rep;
  592. sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
  593. /* set scale */
  594. value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff0000) |
  595. (((mant << 12) | frac) << 16);
  596. sh_veu_reg_write(veu, VEU_RFCR, value);
  597. /* set clip */
  598. value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff0000) |
  599. (((rep << 12) | crop_out) << 16);
  600. sh_veu_reg_write(veu, VEU_RFSR, value);
  601. return ALIGN((size_in * crop_out) / size_out, 4);
  602. }
  603. static unsigned long sh_veu_scale_h(struct sh_veu_dev *veu,
  604. int size_in, int size_out, int crop_out)
  605. {
  606. u32 mant, frac, value, rep;
  607. sh_veu_calc_scale(veu, size_in, size_out, crop_out, &mant, &frac, &rep);
  608. /* set scale */
  609. value = (sh_veu_reg_read(veu, VEU_RFCR) & ~0xffff) |
  610. (mant << 12) | frac;
  611. sh_veu_reg_write(veu, VEU_RFCR, value);
  612. /* set clip */
  613. value = (sh_veu_reg_read(veu, VEU_RFSR) & ~0xffff) |
  614. (rep << 12) | crop_out;
  615. sh_veu_reg_write(veu, VEU_RFSR, value);
  616. return ALIGN((size_in * crop_out) / size_out, 4);
  617. }
  618. static void sh_veu_configure(struct sh_veu_dev *veu)
  619. {
  620. u32 src_width, src_stride, src_height;
  621. u32 dst_width, dst_stride, dst_height;
  622. u32 real_w, real_h;
  623. /* reset VEU */
  624. sh_veu_reg_write(veu, VEU_BSRR, 0x100);
  625. src_width = veu->vfmt_in.frame.width;
  626. src_height = veu->vfmt_in.frame.height;
  627. src_stride = ALIGN(veu->vfmt_in.frame.width, 16);
  628. dst_width = real_w = veu->vfmt_out.frame.width;
  629. dst_height = real_h = veu->vfmt_out.frame.height;
  630. /* Datasheet is unclear - whether it's always number of bytes or not */
  631. dst_stride = veu->vfmt_out.bytesperline;
  632. /*
  633. * So far real_w == dst_width && real_h == dst_height, but it wasn't
  634. * necessarily the case in the original vidix driver, so, it may change
  635. * here in the future too.
  636. */
  637. src_width = sh_veu_scale_h(veu, src_width, real_w, dst_width);
  638. src_height = sh_veu_scale_v(veu, src_height, real_h, dst_height);
  639. sh_veu_reg_write(veu, VEU_SWR, src_stride);
  640. sh_veu_reg_write(veu, VEU_SSR, src_width | (src_height << 16));
  641. sh_veu_reg_write(veu, VEU_BSSR, 0); /* not using bundle mode */
  642. sh_veu_reg_write(veu, VEU_EDWR, dst_stride);
  643. sh_veu_reg_write(veu, VEU_DACR, 0); /* unused for RGB */
  644. sh_veu_reg_write(veu, VEU_SWPR, 0x67);
  645. sh_veu_reg_write(veu, VEU_TRCR, (6 << 16) | (0 << 14) | 2 | 4);
  646. if (veu->is_2h) {
  647. sh_veu_reg_write(veu, VEU_MCR00, 0x0cc5);
  648. sh_veu_reg_write(veu, VEU_MCR01, 0x0950);
  649. sh_veu_reg_write(veu, VEU_MCR02, 0x0000);
  650. sh_veu_reg_write(veu, VEU_MCR10, 0x397f);
  651. sh_veu_reg_write(veu, VEU_MCR11, 0x0950);
  652. sh_veu_reg_write(veu, VEU_MCR12, 0x3ccd);
  653. sh_veu_reg_write(veu, VEU_MCR20, 0x0000);
  654. sh_veu_reg_write(veu, VEU_MCR21, 0x0950);
  655. sh_veu_reg_write(veu, VEU_MCR22, 0x1023);
  656. sh_veu_reg_write(veu, VEU_COFFR, 0x00800010);
  657. }
  658. }
  659. static int sh_veu_streamon(struct file *file, void *priv,
  660. enum v4l2_buf_type type)
  661. {
  662. struct sh_veu_file *veu_file = priv;
  663. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
  664. return -EBUSY;
  665. if (veu_file->cfg_needed) {
  666. struct sh_veu_dev *veu = veu_file->veu_dev;
  667. veu_file->cfg_needed = false;
  668. sh_veu_configure(veu_file->veu_dev);
  669. veu->xaction = 0;
  670. veu->aborting = false;
  671. }
  672. return v4l2_m2m_streamon(file, veu_file->veu_dev->m2m_ctx, type);
  673. }
  674. static int sh_veu_streamoff(struct file *file, void *priv,
  675. enum v4l2_buf_type type)
  676. {
  677. struct sh_veu_file *veu_file = priv;
  678. if (!sh_veu_is_streamer(veu_file->veu_dev, veu_file, type))
  679. return -EBUSY;
  680. return v4l2_m2m_streamoff(file, veu_file->veu_dev->m2m_ctx, type);
  681. }
  682. static const struct v4l2_ioctl_ops sh_veu_ioctl_ops = {
  683. .vidioc_querycap = sh_veu_querycap,
  684. .vidioc_enum_fmt_vid_cap = sh_veu_enum_fmt_vid_cap,
  685. .vidioc_g_fmt_vid_cap = sh_veu_g_fmt_vid_cap,
  686. .vidioc_try_fmt_vid_cap = sh_veu_try_fmt_vid_cap,
  687. .vidioc_s_fmt_vid_cap = sh_veu_s_fmt_vid_cap,
  688. .vidioc_enum_fmt_vid_out = sh_veu_enum_fmt_vid_out,
  689. .vidioc_g_fmt_vid_out = sh_veu_g_fmt_vid_out,
  690. .vidioc_try_fmt_vid_out = sh_veu_try_fmt_vid_out,
  691. .vidioc_s_fmt_vid_out = sh_veu_s_fmt_vid_out,
  692. .vidioc_reqbufs = sh_veu_reqbufs,
  693. .vidioc_querybuf = sh_veu_querybuf,
  694. .vidioc_qbuf = sh_veu_qbuf,
  695. .vidioc_dqbuf = sh_veu_dqbuf,
  696. .vidioc_streamon = sh_veu_streamon,
  697. .vidioc_streamoff = sh_veu_streamoff,
  698. };
  699. /* ========== Queue operations ========== */
  700. static int sh_veu_queue_setup(struct vb2_queue *vq,
  701. unsigned int *nbuffers, unsigned int *nplanes,
  702. unsigned int sizes[], struct device *alloc_devs[])
  703. {
  704. struct sh_veu_dev *veu = vb2_get_drv_priv(vq);
  705. struct sh_veu_vfmt *vfmt = sh_veu_get_vfmt(veu, vq->type);
  706. unsigned int count = *nbuffers;
  707. unsigned int size = vfmt->bytesperline * vfmt->frame.height *
  708. vfmt->fmt->depth / vfmt->fmt->ydepth;
  709. if (count < 2)
  710. *nbuffers = count = 2;
  711. if (size * count > VIDEO_MEM_LIMIT) {
  712. count = VIDEO_MEM_LIMIT / size;
  713. *nbuffers = count;
  714. }
  715. if (*nplanes)
  716. return sizes[0] < size ? -EINVAL : 0;
  717. *nplanes = 1;
  718. sizes[0] = size;
  719. dev_dbg(veu->dev, "get %d buffer(s) of size %d each.\n", count, size);
  720. return 0;
  721. }
  722. static int sh_veu_buf_prepare(struct vb2_buffer *vb)
  723. {
  724. struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
  725. struct sh_veu_vfmt *vfmt;
  726. unsigned int sizeimage;
  727. vfmt = sh_veu_get_vfmt(veu, vb->vb2_queue->type);
  728. sizeimage = vfmt->bytesperline * vfmt->frame.height *
  729. vfmt->fmt->depth / vfmt->fmt->ydepth;
  730. if (vb2_plane_size(vb, 0) < sizeimage) {
  731. dev_dbg(veu->dev, "%s data will not fit into plane (%lu < %u)\n",
  732. __func__, vb2_plane_size(vb, 0), sizeimage);
  733. return -EINVAL;
  734. }
  735. vb2_set_plane_payload(vb, 0, sizeimage);
  736. return 0;
  737. }
  738. static void sh_veu_buf_queue(struct vb2_buffer *vb)
  739. {
  740. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  741. struct sh_veu_dev *veu = vb2_get_drv_priv(vb->vb2_queue);
  742. dev_dbg(veu->dev, "%s(%d)\n", __func__, vb->type);
  743. v4l2_m2m_buf_queue(veu->m2m_ctx, vbuf);
  744. }
  745. static const struct vb2_ops sh_veu_qops = {
  746. .queue_setup = sh_veu_queue_setup,
  747. .buf_prepare = sh_veu_buf_prepare,
  748. .buf_queue = sh_veu_buf_queue,
  749. .wait_prepare = vb2_ops_wait_prepare,
  750. .wait_finish = vb2_ops_wait_finish,
  751. };
  752. static int sh_veu_queue_init(void *priv, struct vb2_queue *src_vq,
  753. struct vb2_queue *dst_vq)
  754. {
  755. struct sh_veu_dev *veu = priv;
  756. int ret;
  757. memset(src_vq, 0, sizeof(*src_vq));
  758. src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
  759. src_vq->io_modes = VB2_MMAP | VB2_USERPTR;
  760. src_vq->drv_priv = veu;
  761. src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
  762. src_vq->ops = &sh_veu_qops;
  763. src_vq->mem_ops = &vb2_dma_contig_memops;
  764. src_vq->lock = &veu->fop_lock;
  765. src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  766. src_vq->dev = veu->v4l2_dev.dev;
  767. ret = vb2_queue_init(src_vq);
  768. if (ret < 0)
  769. return ret;
  770. memset(dst_vq, 0, sizeof(*dst_vq));
  771. dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
  772. dst_vq->io_modes = VB2_MMAP | VB2_USERPTR;
  773. dst_vq->drv_priv = veu;
  774. dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
  775. dst_vq->ops = &sh_veu_qops;
  776. dst_vq->mem_ops = &vb2_dma_contig_memops;
  777. dst_vq->lock = &veu->fop_lock;
  778. dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  779. dst_vq->dev = veu->v4l2_dev.dev;
  780. return vb2_queue_init(dst_vq);
  781. }
  782. /* ========== File operations ========== */
  783. static int sh_veu_open(struct file *file)
  784. {
  785. struct sh_veu_dev *veu = video_drvdata(file);
  786. struct sh_veu_file *veu_file;
  787. veu_file = kzalloc(sizeof(*veu_file), GFP_KERNEL);
  788. if (!veu_file)
  789. return -ENOMEM;
  790. v4l2_fh_init(&veu_file->fh, video_devdata(file));
  791. veu_file->veu_dev = veu;
  792. veu_file->cfg_needed = true;
  793. file->private_data = veu_file;
  794. pm_runtime_get_sync(veu->dev);
  795. v4l2_fh_add(&veu_file->fh);
  796. dev_dbg(veu->dev, "Created instance %p\n", veu_file);
  797. return 0;
  798. }
  799. static int sh_veu_release(struct file *file)
  800. {
  801. struct sh_veu_dev *veu = video_drvdata(file);
  802. struct sh_veu_file *veu_file = file->private_data;
  803. dev_dbg(veu->dev, "Releasing instance %p\n", veu_file);
  804. if (veu_file == veu->capture) {
  805. veu->capture = NULL;
  806. vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE));
  807. }
  808. if (veu_file == veu->output) {
  809. veu->output = NULL;
  810. vb2_queue_release(v4l2_m2m_get_vq(veu->m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT));
  811. }
  812. if (!veu->output && !veu->capture && veu->m2m_ctx) {
  813. v4l2_m2m_ctx_release(veu->m2m_ctx);
  814. veu->m2m_ctx = NULL;
  815. }
  816. pm_runtime_put(veu->dev);
  817. v4l2_fh_del(&veu_file->fh);
  818. v4l2_fh_exit(&veu_file->fh);
  819. kfree(veu_file);
  820. return 0;
  821. }
  822. static __poll_t sh_veu_poll(struct file *file,
  823. struct poll_table_struct *wait)
  824. {
  825. struct sh_veu_file *veu_file = file->private_data;
  826. return v4l2_m2m_poll(file, veu_file->veu_dev->m2m_ctx, wait);
  827. }
  828. static int sh_veu_mmap(struct file *file, struct vm_area_struct *vma)
  829. {
  830. struct sh_veu_file *veu_file = file->private_data;
  831. return v4l2_m2m_mmap(file, veu_file->veu_dev->m2m_ctx, vma);
  832. }
  833. static const struct v4l2_file_operations sh_veu_fops = {
  834. .owner = THIS_MODULE,
  835. .open = sh_veu_open,
  836. .release = sh_veu_release,
  837. .poll = sh_veu_poll,
  838. .unlocked_ioctl = video_ioctl2,
  839. .mmap = sh_veu_mmap,
  840. };
  841. static const struct video_device sh_veu_videodev = {
  842. .name = "sh-veu",
  843. .fops = &sh_veu_fops,
  844. .ioctl_ops = &sh_veu_ioctl_ops,
  845. .minor = -1,
  846. .release = video_device_release_empty,
  847. .vfl_dir = VFL_DIR_M2M,
  848. .device_caps = V4L2_CAP_VIDEO_M2M | V4L2_CAP_STREAMING,
  849. };
  850. static const struct v4l2_m2m_ops sh_veu_m2m_ops = {
  851. .device_run = sh_veu_device_run,
  852. .job_abort = sh_veu_job_abort,
  853. };
  854. static irqreturn_t sh_veu_bh(int irq, void *dev_id)
  855. {
  856. struct sh_veu_dev *veu = dev_id;
  857. if (veu->xaction == MEM2MEM_DEF_TRANSLEN || veu->aborting) {
  858. v4l2_m2m_job_finish(veu->m2m_dev, veu->m2m_ctx);
  859. veu->xaction = 0;
  860. } else {
  861. sh_veu_device_run(veu);
  862. }
  863. return IRQ_HANDLED;
  864. }
  865. static irqreturn_t sh_veu_isr(int irq, void *dev_id)
  866. {
  867. struct sh_veu_dev *veu = dev_id;
  868. struct vb2_v4l2_buffer *dst;
  869. struct vb2_v4l2_buffer *src;
  870. u32 status = sh_veu_reg_read(veu, VEU_EVTR);
  871. /* bundle read mode not used */
  872. if (!(status & 1))
  873. return IRQ_NONE;
  874. /* disable interrupt in VEU */
  875. sh_veu_reg_write(veu, VEU_EIER, 0);
  876. /* halt operation */
  877. sh_veu_reg_write(veu, VEU_STR, 0);
  878. /* ack int, write 0 to clear bits */
  879. sh_veu_reg_write(veu, VEU_EVTR, status & ~1);
  880. /* conversion completed */
  881. dst = v4l2_m2m_dst_buf_remove(veu->m2m_ctx);
  882. src = v4l2_m2m_src_buf_remove(veu->m2m_ctx);
  883. if (!src || !dst)
  884. return IRQ_NONE;
  885. dst->vb2_buf.timestamp = src->vb2_buf.timestamp;
  886. dst->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
  887. dst->flags |=
  888. src->flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
  889. dst->timecode = src->timecode;
  890. spin_lock(&veu->lock);
  891. v4l2_m2m_buf_done(src, VB2_BUF_STATE_DONE);
  892. v4l2_m2m_buf_done(dst, VB2_BUF_STATE_DONE);
  893. spin_unlock(&veu->lock);
  894. veu->xaction++;
  895. return IRQ_WAKE_THREAD;
  896. }
  897. static int sh_veu_probe(struct platform_device *pdev)
  898. {
  899. struct sh_veu_dev *veu;
  900. struct resource *reg_res;
  901. struct video_device *vdev;
  902. int irq, ret;
  903. reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  904. irq = platform_get_irq(pdev, 0);
  905. if (!reg_res || irq <= 0) {
  906. dev_err(&pdev->dev, "Insufficient VEU platform information.\n");
  907. return -ENODEV;
  908. }
  909. veu = devm_kzalloc(&pdev->dev, sizeof(*veu), GFP_KERNEL);
  910. if (!veu)
  911. return -ENOMEM;
  912. veu->is_2h = resource_size(reg_res) == 0x22c;
  913. veu->base = devm_ioremap_resource(&pdev->dev, reg_res);
  914. if (IS_ERR(veu->base))
  915. return PTR_ERR(veu->base);
  916. ret = devm_request_threaded_irq(&pdev->dev, irq, sh_veu_isr, sh_veu_bh,
  917. 0, "veu", veu);
  918. if (ret < 0)
  919. return ret;
  920. ret = v4l2_device_register(&pdev->dev, &veu->v4l2_dev);
  921. if (ret < 0) {
  922. dev_err(&pdev->dev, "Error registering v4l2 device\n");
  923. return ret;
  924. }
  925. vdev = &veu->vdev;
  926. *vdev = sh_veu_videodev;
  927. vdev->v4l2_dev = &veu->v4l2_dev;
  928. spin_lock_init(&veu->lock);
  929. mutex_init(&veu->fop_lock);
  930. vdev->lock = &veu->fop_lock;
  931. video_set_drvdata(vdev, veu);
  932. veu->dev = &pdev->dev;
  933. veu->vfmt_out = DEFAULT_OUT_VFMT;
  934. veu->vfmt_in = DEFAULT_IN_VFMT;
  935. veu->m2m_dev = v4l2_m2m_init(&sh_veu_m2m_ops);
  936. if (IS_ERR(veu->m2m_dev)) {
  937. ret = PTR_ERR(veu->m2m_dev);
  938. v4l2_err(&veu->v4l2_dev, "Failed to init mem2mem device: %d\n", ret);
  939. goto em2minit;
  940. }
  941. pm_runtime_enable(&pdev->dev);
  942. pm_runtime_resume(&pdev->dev);
  943. ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
  944. pm_runtime_suspend(&pdev->dev);
  945. if (ret < 0)
  946. goto evidreg;
  947. return ret;
  948. evidreg:
  949. pm_runtime_disable(&pdev->dev);
  950. v4l2_m2m_release(veu->m2m_dev);
  951. em2minit:
  952. v4l2_device_unregister(&veu->v4l2_dev);
  953. return ret;
  954. }
  955. static int sh_veu_remove(struct platform_device *pdev)
  956. {
  957. struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev);
  958. struct sh_veu_dev *veu = container_of(v4l2_dev,
  959. struct sh_veu_dev, v4l2_dev);
  960. video_unregister_device(&veu->vdev);
  961. pm_runtime_disable(&pdev->dev);
  962. v4l2_m2m_release(veu->m2m_dev);
  963. v4l2_device_unregister(&veu->v4l2_dev);
  964. return 0;
  965. }
  966. static struct platform_driver __refdata sh_veu_pdrv = {
  967. .remove = sh_veu_remove,
  968. .driver = {
  969. .name = "sh_veu",
  970. },
  971. };
  972. module_platform_driver_probe(sh_veu_pdrv, sh_veu_probe);
  973. MODULE_DESCRIPTION("sh-mobile VEU mem2mem driver");
  974. MODULE_AUTHOR("Guennadi Liakhovetski, <g.liakhovetski@gmx.de>");
  975. MODULE_LICENSE("GPL v2");