rcar_fdp1.c 66 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455
  1. // SPDX-License-Identifier: GPL-2.0+
  2. /*
  3. * Renesas R-Car Fine Display Processor
  4. *
  5. * Video format converter and frame deinterlacer device.
  6. *
  7. * Author: Kieran Bingham, <kieran@bingham.xyz>
  8. * Copyright (c) 2016 Renesas Electronics Corporation.
  9. *
  10. * This code is developed and inspired from the vim2m, rcar_jpu,
  11. * m2m-deinterlace, and vsp1 drivers.
  12. */
  13. #include <linux/clk.h>
  14. #include <linux/delay.h>
  15. #include <linux/dma-mapping.h>
  16. #include <linux/fs.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/module.h>
  19. #include <linux/of.h>
  20. #include <linux/of_device.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/pm_runtime.h>
  23. #include <linux/sched.h>
  24. #include <linux/slab.h>
  25. #include <linux/timer.h>
  26. #include <media/rcar-fcp.h>
  27. #include <media/v4l2-ctrls.h>
  28. #include <media/v4l2-device.h>
  29. #include <media/v4l2-event.h>
  30. #include <media/v4l2-ioctl.h>
  31. #include <media/v4l2-mem2mem.h>
  32. #include <media/videobuf2-dma-contig.h>
  33. static unsigned int debug;
  34. module_param(debug, uint, 0644);
  35. MODULE_PARM_DESC(debug, "activate debug info");
  36. /* Minimum and maximum frame width/height */
  37. #define FDP1_MIN_W 80U
  38. #define FDP1_MIN_H 80U
  39. #define FDP1_MAX_W 3840U
  40. #define FDP1_MAX_H 2160U
  41. #define FDP1_MAX_PLANES 3U
  42. #define FDP1_MAX_STRIDE 8190U
  43. /* Flags that indicate a format can be used for capture/output */
  44. #define FDP1_CAPTURE BIT(0)
  45. #define FDP1_OUTPUT BIT(1)
  46. #define DRIVER_NAME "rcar_fdp1"
  47. /* Number of Job's to have available on the processing queue */
  48. #define FDP1_NUMBER_JOBS 8
  49. #define dprintk(fdp1, fmt, arg...) \
  50. v4l2_dbg(1, debug, &fdp1->v4l2_dev, "%s: " fmt, __func__, ## arg)
  51. /*
  52. * FDP1 registers and bits
  53. */
  54. /* FDP1 start register - Imm */
  55. #define FD1_CTL_CMD 0x0000
  56. #define FD1_CTL_CMD_STRCMD BIT(0)
  57. /* Sync generator register - Imm */
  58. #define FD1_CTL_SGCMD 0x0004
  59. #define FD1_CTL_SGCMD_SGEN BIT(0)
  60. /* Register set end register - Imm */
  61. #define FD1_CTL_REGEND 0x0008
  62. #define FD1_CTL_REGEND_REGEND BIT(0)
  63. /* Channel activation register - Vupdt */
  64. #define FD1_CTL_CHACT 0x000c
  65. #define FD1_CTL_CHACT_SMW BIT(9)
  66. #define FD1_CTL_CHACT_WR BIT(8)
  67. #define FD1_CTL_CHACT_SMR BIT(3)
  68. #define FD1_CTL_CHACT_RD2 BIT(2)
  69. #define FD1_CTL_CHACT_RD1 BIT(1)
  70. #define FD1_CTL_CHACT_RD0 BIT(0)
  71. /* Operation Mode Register - Vupdt */
  72. #define FD1_CTL_OPMODE 0x0010
  73. #define FD1_CTL_OPMODE_PRG BIT(4)
  74. #define FD1_CTL_OPMODE_VIMD_INTERRUPT (0 << 0)
  75. #define FD1_CTL_OPMODE_VIMD_BESTEFFORT (1 << 0)
  76. #define FD1_CTL_OPMODE_VIMD_NOINTERRUPT (2 << 0)
  77. #define FD1_CTL_VPERIOD 0x0014
  78. #define FD1_CTL_CLKCTRL 0x0018
  79. #define FD1_CTL_CLKCTRL_CSTP_N BIT(0)
  80. /* Software reset register */
  81. #define FD1_CTL_SRESET 0x001c
  82. #define FD1_CTL_SRESET_SRST BIT(0)
  83. /* Control status register (V-update-status) */
  84. #define FD1_CTL_STATUS 0x0024
  85. #define FD1_CTL_STATUS_VINT_CNT_MASK GENMASK(31, 16)
  86. #define FD1_CTL_STATUS_VINT_CNT_SHIFT 16
  87. #define FD1_CTL_STATUS_SGREGSET BIT(10)
  88. #define FD1_CTL_STATUS_SGVERR BIT(9)
  89. #define FD1_CTL_STATUS_SGFREND BIT(8)
  90. #define FD1_CTL_STATUS_BSY BIT(0)
  91. #define FD1_CTL_VCYCLE_STAT 0x0028
  92. /* Interrupt enable register */
  93. #define FD1_CTL_IRQENB 0x0038
  94. /* Interrupt status register */
  95. #define FD1_CTL_IRQSTA 0x003c
  96. /* Interrupt control register */
  97. #define FD1_CTL_IRQFSET 0x0040
  98. /* Common IRQ Bit settings */
  99. #define FD1_CTL_IRQ_VERE BIT(16)
  100. #define FD1_CTL_IRQ_VINTE BIT(4)
  101. #define FD1_CTL_IRQ_FREE BIT(0)
  102. #define FD1_CTL_IRQ_MASK (FD1_CTL_IRQ_VERE | \
  103. FD1_CTL_IRQ_VINTE | \
  104. FD1_CTL_IRQ_FREE)
  105. /* RPF */
  106. #define FD1_RPF_SIZE 0x0060
  107. #define FD1_RPF_SIZE_MASK GENMASK(12, 0)
  108. #define FD1_RPF_SIZE_H_SHIFT 16
  109. #define FD1_RPF_SIZE_V_SHIFT 0
  110. #define FD1_RPF_FORMAT 0x0064
  111. #define FD1_RPF_FORMAT_CIPM BIT(16)
  112. #define FD1_RPF_FORMAT_RSPYCS BIT(13)
  113. #define FD1_RPF_FORMAT_RSPUVS BIT(12)
  114. #define FD1_RPF_FORMAT_CF BIT(8)
  115. #define FD1_RPF_PSTRIDE 0x0068
  116. #define FD1_RPF_PSTRIDE_Y_SHIFT 16
  117. #define FD1_RPF_PSTRIDE_C_SHIFT 0
  118. /* RPF0 Source Component Y Address register */
  119. #define FD1_RPF0_ADDR_Y 0x006c
  120. /* RPF1 Current Picture Registers */
  121. #define FD1_RPF1_ADDR_Y 0x0078
  122. #define FD1_RPF1_ADDR_C0 0x007c
  123. #define FD1_RPF1_ADDR_C1 0x0080
  124. /* RPF2 next picture register */
  125. #define FD1_RPF2_ADDR_Y 0x0084
  126. #define FD1_RPF_SMSK_ADDR 0x0090
  127. #define FD1_RPF_SWAP 0x0094
  128. /* WPF */
  129. #define FD1_WPF_FORMAT 0x00c0
  130. #define FD1_WPF_FORMAT_PDV_SHIFT 24
  131. #define FD1_WPF_FORMAT_FCNL BIT(20)
  132. #define FD1_WPF_FORMAT_WSPYCS BIT(15)
  133. #define FD1_WPF_FORMAT_WSPUVS BIT(14)
  134. #define FD1_WPF_FORMAT_WRTM_601_16 (0 << 9)
  135. #define FD1_WPF_FORMAT_WRTM_601_0 (1 << 9)
  136. #define FD1_WPF_FORMAT_WRTM_709_16 (2 << 9)
  137. #define FD1_WPF_FORMAT_CSC BIT(8)
  138. #define FD1_WPF_RNDCTL 0x00c4
  139. #define FD1_WPF_RNDCTL_CBRM BIT(28)
  140. #define FD1_WPF_RNDCTL_CLMD_NOCLIP (0 << 12)
  141. #define FD1_WPF_RNDCTL_CLMD_CLIP_16_235 (1 << 12)
  142. #define FD1_WPF_RNDCTL_CLMD_CLIP_1_254 (2 << 12)
  143. #define FD1_WPF_PSTRIDE 0x00c8
  144. #define FD1_WPF_PSTRIDE_Y_SHIFT 16
  145. #define FD1_WPF_PSTRIDE_C_SHIFT 0
  146. /* WPF Destination picture */
  147. #define FD1_WPF_ADDR_Y 0x00cc
  148. #define FD1_WPF_ADDR_C0 0x00d0
  149. #define FD1_WPF_ADDR_C1 0x00d4
  150. #define FD1_WPF_SWAP 0x00d8
  151. #define FD1_WPF_SWAP_OSWAP_SHIFT 0
  152. #define FD1_WPF_SWAP_SSWAP_SHIFT 4
  153. /* WPF/RPF Common */
  154. #define FD1_RWPF_SWAP_BYTE BIT(0)
  155. #define FD1_RWPF_SWAP_WORD BIT(1)
  156. #define FD1_RWPF_SWAP_LWRD BIT(2)
  157. #define FD1_RWPF_SWAP_LLWD BIT(3)
  158. /* IPC */
  159. #define FD1_IPC_MODE 0x0100
  160. #define FD1_IPC_MODE_DLI BIT(8)
  161. #define FD1_IPC_MODE_DIM_ADAPT2D3D (0 << 0)
  162. #define FD1_IPC_MODE_DIM_FIXED2D (1 << 0)
  163. #define FD1_IPC_MODE_DIM_FIXED3D (2 << 0)
  164. #define FD1_IPC_MODE_DIM_PREVFIELD (3 << 0)
  165. #define FD1_IPC_MODE_DIM_NEXTFIELD (4 << 0)
  166. #define FD1_IPC_SMSK_THRESH 0x0104
  167. #define FD1_IPC_SMSK_THRESH_CONST 0x00010002
  168. #define FD1_IPC_COMB_DET 0x0108
  169. #define FD1_IPC_COMB_DET_CONST 0x00200040
  170. #define FD1_IPC_MOTDEC 0x010c
  171. #define FD1_IPC_MOTDEC_CONST 0x00008020
  172. /* DLI registers */
  173. #define FD1_IPC_DLI_BLEND 0x0120
  174. #define FD1_IPC_DLI_BLEND_CONST 0x0080ff02
  175. #define FD1_IPC_DLI_HGAIN 0x0124
  176. #define FD1_IPC_DLI_HGAIN_CONST 0x001000ff
  177. #define FD1_IPC_DLI_SPRS 0x0128
  178. #define FD1_IPC_DLI_SPRS_CONST 0x009004ff
  179. #define FD1_IPC_DLI_ANGLE 0x012c
  180. #define FD1_IPC_DLI_ANGLE_CONST 0x0004080c
  181. #define FD1_IPC_DLI_ISOPIX0 0x0130
  182. #define FD1_IPC_DLI_ISOPIX0_CONST 0xff10ff10
  183. #define FD1_IPC_DLI_ISOPIX1 0x0134
  184. #define FD1_IPC_DLI_ISOPIX1_CONST 0x0000ff10
  185. /* Sensor registers */
  186. #define FD1_IPC_SENSOR_TH0 0x0140
  187. #define FD1_IPC_SENSOR_TH0_CONST 0x20208080
  188. #define FD1_IPC_SENSOR_TH1 0x0144
  189. #define FD1_IPC_SENSOR_TH1_CONST 0
  190. #define FD1_IPC_SENSOR_CTL0 0x0170
  191. #define FD1_IPC_SENSOR_CTL0_CONST 0x00002201
  192. #define FD1_IPC_SENSOR_CTL1 0x0174
  193. #define FD1_IPC_SENSOR_CTL1_CONST 0
  194. #define FD1_IPC_SENSOR_CTL2 0x0178
  195. #define FD1_IPC_SENSOR_CTL2_X_SHIFT 16
  196. #define FD1_IPC_SENSOR_CTL2_Y_SHIFT 0
  197. #define FD1_IPC_SENSOR_CTL3 0x017c
  198. #define FD1_IPC_SENSOR_CTL3_0_SHIFT 16
  199. #define FD1_IPC_SENSOR_CTL3_1_SHIFT 0
  200. /* Line memory pixel number register */
  201. #define FD1_IPC_LMEM 0x01e0
  202. #define FD1_IPC_LMEM_LINEAR 1024
  203. #define FD1_IPC_LMEM_TILE 960
  204. /* Internal Data (HW Version) */
  205. #define FD1_IP_INTDATA 0x0800
  206. #define FD1_IP_H3_ES1 0x02010101
  207. #define FD1_IP_M3W 0x02010202
  208. #define FD1_IP_H3 0x02010203
  209. #define FD1_IP_M3N 0x02010204
  210. #define FD1_IP_E3 0x02010205
  211. /* LUTs */
  212. #define FD1_LUT_DIF_ADJ 0x1000
  213. #define FD1_LUT_SAD_ADJ 0x1400
  214. #define FD1_LUT_BLD_GAIN 0x1800
  215. #define FD1_LUT_DIF_GAIN 0x1c00
  216. #define FD1_LUT_MDET 0x2000
  217. /**
  218. * struct fdp1_fmt - The FDP1 internal format data
  219. * @fourcc: the fourcc code, to match the V4L2 API
  220. * @bpp: bits per pixel per plane
  221. * @num_planes: number of planes
  222. * @hsub: horizontal subsampling factor
  223. * @vsub: vertical subsampling factor
  224. * @fmt: 7-bit format code for the fdp1 hardware
  225. * @swap_yc: the Y and C components are swapped (Y comes before C)
  226. * @swap_uv: the U and V components are swapped (V comes before U)
  227. * @swap: swap register control
  228. * @types: types of queue this format is applicable to
  229. */
  230. struct fdp1_fmt {
  231. u32 fourcc;
  232. u8 bpp[3];
  233. u8 num_planes;
  234. u8 hsub;
  235. u8 vsub;
  236. u8 fmt;
  237. bool swap_yc;
  238. bool swap_uv;
  239. u8 swap;
  240. u8 types;
  241. };
  242. static const struct fdp1_fmt fdp1_formats[] = {
  243. /* RGB formats are only supported by the Write Pixel Formatter */
  244. { V4L2_PIX_FMT_RGB332, { 8, 0, 0 }, 1, 1, 1, 0x00, false, false,
  245. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  246. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  247. FDP1_CAPTURE },
  248. { V4L2_PIX_FMT_XRGB444, { 16, 0, 0 }, 1, 1, 1, 0x01, false, false,
  249. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  250. FD1_RWPF_SWAP_WORD,
  251. FDP1_CAPTURE },
  252. { V4L2_PIX_FMT_XRGB555, { 16, 0, 0 }, 1, 1, 1, 0x04, false, false,
  253. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  254. FD1_RWPF_SWAP_WORD,
  255. FDP1_CAPTURE },
  256. { V4L2_PIX_FMT_RGB565, { 16, 0, 0 }, 1, 1, 1, 0x06, false, false,
  257. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  258. FD1_RWPF_SWAP_WORD,
  259. FDP1_CAPTURE },
  260. { V4L2_PIX_FMT_ABGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
  261. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
  262. FDP1_CAPTURE },
  263. { V4L2_PIX_FMT_XBGR32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
  264. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD,
  265. FDP1_CAPTURE },
  266. { V4L2_PIX_FMT_ARGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
  267. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  268. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  269. FDP1_CAPTURE },
  270. { V4L2_PIX_FMT_XRGB32, { 32, 0, 0 }, 1, 1, 1, 0x13, false, false,
  271. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  272. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  273. FDP1_CAPTURE },
  274. { V4L2_PIX_FMT_RGB24, { 24, 0, 0 }, 1, 1, 1, 0x15, false, false,
  275. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  276. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  277. FDP1_CAPTURE },
  278. { V4L2_PIX_FMT_BGR24, { 24, 0, 0 }, 1, 1, 1, 0x18, false, false,
  279. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  280. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  281. FDP1_CAPTURE },
  282. { V4L2_PIX_FMT_ARGB444, { 16, 0, 0 }, 1, 1, 1, 0x19, false, false,
  283. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  284. FD1_RWPF_SWAP_WORD,
  285. FDP1_CAPTURE },
  286. { V4L2_PIX_FMT_ARGB555, { 16, 0, 0 }, 1, 1, 1, 0x1b, false, false,
  287. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  288. FD1_RWPF_SWAP_WORD,
  289. FDP1_CAPTURE },
  290. /* YUV Formats are supported by Read and Write Pixel Formatters */
  291. { V4L2_PIX_FMT_NV16M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, false,
  292. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  293. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  294. FDP1_CAPTURE | FDP1_OUTPUT },
  295. { V4L2_PIX_FMT_NV61M, { 8, 16, 0 }, 2, 2, 1, 0x41, false, true,
  296. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  297. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  298. FDP1_CAPTURE | FDP1_OUTPUT },
  299. { V4L2_PIX_FMT_NV12M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, false,
  300. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  301. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  302. FDP1_CAPTURE | FDP1_OUTPUT },
  303. { V4L2_PIX_FMT_NV21M, { 8, 16, 0 }, 2, 2, 2, 0x42, false, true,
  304. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  305. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  306. FDP1_CAPTURE | FDP1_OUTPUT },
  307. { V4L2_PIX_FMT_UYVY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, false,
  308. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  309. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  310. FDP1_CAPTURE | FDP1_OUTPUT },
  311. { V4L2_PIX_FMT_VYUY, { 16, 0, 0 }, 1, 2, 1, 0x47, false, true,
  312. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  313. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  314. FDP1_CAPTURE | FDP1_OUTPUT },
  315. { V4L2_PIX_FMT_YUYV, { 16, 0, 0 }, 1, 2, 1, 0x47, true, false,
  316. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  317. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  318. FDP1_CAPTURE | FDP1_OUTPUT },
  319. { V4L2_PIX_FMT_YVYU, { 16, 0, 0 }, 1, 2, 1, 0x47, true, true,
  320. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  321. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  322. FDP1_CAPTURE | FDP1_OUTPUT },
  323. { V4L2_PIX_FMT_YUV444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, false,
  324. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  325. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  326. FDP1_CAPTURE | FDP1_OUTPUT },
  327. { V4L2_PIX_FMT_YVU444M, { 8, 8, 8 }, 3, 1, 1, 0x4a, false, true,
  328. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  329. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  330. FDP1_CAPTURE | FDP1_OUTPUT },
  331. { V4L2_PIX_FMT_YUV422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, false,
  332. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  333. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  334. FDP1_CAPTURE | FDP1_OUTPUT },
  335. { V4L2_PIX_FMT_YVU422M, { 8, 8, 8 }, 3, 2, 1, 0x4b, false, true,
  336. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  337. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  338. FDP1_CAPTURE | FDP1_OUTPUT },
  339. { V4L2_PIX_FMT_YUV420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, false,
  340. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  341. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  342. FDP1_CAPTURE | FDP1_OUTPUT },
  343. { V4L2_PIX_FMT_YVU420M, { 8, 8, 8 }, 3, 2, 2, 0x4c, false, true,
  344. FD1_RWPF_SWAP_LLWD | FD1_RWPF_SWAP_LWRD |
  345. FD1_RWPF_SWAP_WORD | FD1_RWPF_SWAP_BYTE,
  346. FDP1_CAPTURE | FDP1_OUTPUT },
  347. };
  348. static int fdp1_fmt_is_rgb(const struct fdp1_fmt *fmt)
  349. {
  350. return fmt->fmt <= 0x1b; /* Last RGB code */
  351. }
  352. /*
  353. * FDP1 Lookup tables range from 0...255 only
  354. *
  355. * Each table must be less than 256 entries, and all tables
  356. * are padded out to 256 entries by duplicating the last value.
  357. */
  358. static const u8 fdp1_diff_adj[] = {
  359. 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
  360. 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
  361. 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
  362. };
  363. static const u8 fdp1_sad_adj[] = {
  364. 0x00, 0x24, 0x43, 0x5e, 0x76, 0x8c, 0x9e, 0xaf,
  365. 0xbd, 0xc9, 0xd4, 0xdd, 0xe4, 0xea, 0xef, 0xf3,
  366. 0xf6, 0xf9, 0xfb, 0xfc, 0xfd, 0xfe, 0xfe, 0xff,
  367. };
  368. static const u8 fdp1_bld_gain[] = {
  369. 0x80,
  370. };
  371. static const u8 fdp1_dif_gain[] = {
  372. 0x80,
  373. };
  374. static const u8 fdp1_mdet[] = {
  375. 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
  376. 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
  377. 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
  378. 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
  379. 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
  380. 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
  381. 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
  382. 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
  383. 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47,
  384. 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
  385. 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57,
  386. 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f,
  387. 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67,
  388. 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f,
  389. 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77,
  390. 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f,
  391. 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87,
  392. 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f,
  393. 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97,
  394. 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f,
  395. 0xa0, 0xa1, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7,
  396. 0xa8, 0xa9, 0xaa, 0xab, 0xac, 0xad, 0xae, 0xaf,
  397. 0xb0, 0xb1, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7,
  398. 0xb8, 0xb9, 0xba, 0xbb, 0xbc, 0xbd, 0xbe, 0xbf,
  399. 0xc0, 0xc1, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7,
  400. 0xc8, 0xc9, 0xca, 0xcb, 0xcc, 0xcd, 0xce, 0xcf,
  401. 0xd0, 0xd1, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7,
  402. 0xd8, 0xd9, 0xda, 0xdb, 0xdc, 0xdd, 0xde, 0xdf,
  403. 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7,
  404. 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef,
  405. 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7,
  406. 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xff
  407. };
  408. /* Per-queue, driver-specific private data */
  409. struct fdp1_q_data {
  410. const struct fdp1_fmt *fmt;
  411. struct v4l2_pix_format_mplane format;
  412. unsigned int vsize;
  413. unsigned int stride_y;
  414. unsigned int stride_c;
  415. };
  416. static const struct fdp1_fmt *fdp1_find_format(u32 pixelformat)
  417. {
  418. const struct fdp1_fmt *fmt;
  419. unsigned int i;
  420. for (i = 0; i < ARRAY_SIZE(fdp1_formats); i++) {
  421. fmt = &fdp1_formats[i];
  422. if (fmt->fourcc == pixelformat)
  423. return fmt;
  424. }
  425. return NULL;
  426. }
  427. enum fdp1_deint_mode {
  428. FDP1_PROGRESSIVE = 0, /* Must be zero when !deinterlacing */
  429. FDP1_ADAPT2D3D,
  430. FDP1_FIXED2D,
  431. FDP1_FIXED3D,
  432. FDP1_PREVFIELD,
  433. FDP1_NEXTFIELD,
  434. };
  435. #define FDP1_DEINT_MODE_USES_NEXT(mode) \
  436. (mode == FDP1_ADAPT2D3D || \
  437. mode == FDP1_FIXED3D || \
  438. mode == FDP1_NEXTFIELD)
  439. #define FDP1_DEINT_MODE_USES_PREV(mode) \
  440. (mode == FDP1_ADAPT2D3D || \
  441. mode == FDP1_FIXED3D || \
  442. mode == FDP1_PREVFIELD)
  443. /*
  444. * FDP1 operates on potentially 3 fields, which are tracked
  445. * from the VB buffers using this context structure.
  446. * Will always be a field or a full frame, never two fields.
  447. */
  448. struct fdp1_field_buffer {
  449. struct vb2_v4l2_buffer *vb;
  450. dma_addr_t addrs[3];
  451. /* Should be NONE:TOP:BOTTOM only */
  452. enum v4l2_field field;
  453. /* Flag to indicate this is the last field in the vb */
  454. bool last_field;
  455. /* Buffer queue lists */
  456. struct list_head list;
  457. };
  458. struct fdp1_buffer {
  459. struct v4l2_m2m_buffer m2m_buf;
  460. struct fdp1_field_buffer fields[2];
  461. unsigned int num_fields;
  462. };
  463. static inline struct fdp1_buffer *to_fdp1_buffer(struct vb2_v4l2_buffer *vb)
  464. {
  465. return container_of(vb, struct fdp1_buffer, m2m_buf.vb);
  466. }
  467. struct fdp1_job {
  468. struct fdp1_field_buffer *previous;
  469. struct fdp1_field_buffer *active;
  470. struct fdp1_field_buffer *next;
  471. struct fdp1_field_buffer *dst;
  472. /* A job can only be on one list at a time */
  473. struct list_head list;
  474. };
  475. struct fdp1_dev {
  476. struct v4l2_device v4l2_dev;
  477. struct video_device vfd;
  478. struct mutex dev_mutex;
  479. spinlock_t irqlock;
  480. spinlock_t device_process_lock;
  481. void __iomem *regs;
  482. unsigned int irq;
  483. struct device *dev;
  484. /* Job Queues */
  485. struct fdp1_job jobs[FDP1_NUMBER_JOBS];
  486. struct list_head free_job_list;
  487. struct list_head queued_job_list;
  488. struct list_head hw_job_list;
  489. unsigned int clk_rate;
  490. struct rcar_fcp_device *fcp;
  491. struct v4l2_m2m_dev *m2m_dev;
  492. };
  493. struct fdp1_ctx {
  494. struct v4l2_fh fh;
  495. struct fdp1_dev *fdp1;
  496. struct v4l2_ctrl_handler hdl;
  497. unsigned int sequence;
  498. /* Processed buffers in this transaction */
  499. u8 num_processed;
  500. /* Transaction length (i.e. how many buffers per transaction) */
  501. u32 translen;
  502. /* Abort requested by m2m */
  503. int aborting;
  504. /* Deinterlace processing mode */
  505. enum fdp1_deint_mode deint_mode;
  506. /*
  507. * Adaptive 2D/3D mode uses a shared mask
  508. * This is allocated at streamon, if the ADAPT2D3D mode
  509. * is requested
  510. */
  511. unsigned int smsk_size;
  512. dma_addr_t smsk_addr[2];
  513. void *smsk_cpu;
  514. /* Capture pipeline, can specify an alpha value
  515. * for supported formats. 0-255 only
  516. */
  517. unsigned char alpha;
  518. /* Source and destination queue data */
  519. struct fdp1_q_data out_q; /* HW Source */
  520. struct fdp1_q_data cap_q; /* HW Destination */
  521. /*
  522. * Field Queues
  523. * Interlaced fields are used on 3 occasions, and tracked in this list.
  524. *
  525. * V4L2 Buffers are tracked inside the fdp1_buffer
  526. * and released when the last 'field' completes
  527. */
  528. struct list_head fields_queue;
  529. unsigned int buffers_queued;
  530. /*
  531. * For de-interlacing we need to track our previous buffer
  532. * while preparing our job lists.
  533. */
  534. struct fdp1_field_buffer *previous;
  535. };
  536. static inline struct fdp1_ctx *fh_to_ctx(struct v4l2_fh *fh)
  537. {
  538. return container_of(fh, struct fdp1_ctx, fh);
  539. }
  540. static struct fdp1_q_data *get_q_data(struct fdp1_ctx *ctx,
  541. enum v4l2_buf_type type)
  542. {
  543. if (V4L2_TYPE_IS_OUTPUT(type))
  544. return &ctx->out_q;
  545. else
  546. return &ctx->cap_q;
  547. }
  548. /*
  549. * list_remove_job: Take the first item off the specified job list
  550. *
  551. * Returns: pointer to a job, or NULL if the list is empty.
  552. */
  553. static struct fdp1_job *list_remove_job(struct fdp1_dev *fdp1,
  554. struct list_head *list)
  555. {
  556. struct fdp1_job *job;
  557. unsigned long flags;
  558. spin_lock_irqsave(&fdp1->irqlock, flags);
  559. job = list_first_entry_or_null(list, struct fdp1_job, list);
  560. if (job)
  561. list_del(&job->list);
  562. spin_unlock_irqrestore(&fdp1->irqlock, flags);
  563. return job;
  564. }
  565. /*
  566. * list_add_job: Add a job to the specified job list
  567. *
  568. * Returns: void - always succeeds
  569. */
  570. static void list_add_job(struct fdp1_dev *fdp1,
  571. struct list_head *list,
  572. struct fdp1_job *job)
  573. {
  574. unsigned long flags;
  575. spin_lock_irqsave(&fdp1->irqlock, flags);
  576. list_add_tail(&job->list, list);
  577. spin_unlock_irqrestore(&fdp1->irqlock, flags);
  578. }
  579. static struct fdp1_job *fdp1_job_alloc(struct fdp1_dev *fdp1)
  580. {
  581. return list_remove_job(fdp1, &fdp1->free_job_list);
  582. }
  583. static void fdp1_job_free(struct fdp1_dev *fdp1, struct fdp1_job *job)
  584. {
  585. /* Ensure that all residue from previous jobs is gone */
  586. memset(job, 0, sizeof(struct fdp1_job));
  587. list_add_job(fdp1, &fdp1->free_job_list, job);
  588. }
  589. static void queue_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
  590. {
  591. list_add_job(fdp1, &fdp1->queued_job_list, job);
  592. }
  593. static struct fdp1_job *get_queued_job(struct fdp1_dev *fdp1)
  594. {
  595. return list_remove_job(fdp1, &fdp1->queued_job_list);
  596. }
  597. static void queue_hw_job(struct fdp1_dev *fdp1, struct fdp1_job *job)
  598. {
  599. list_add_job(fdp1, &fdp1->hw_job_list, job);
  600. }
  601. static struct fdp1_job *get_hw_queued_job(struct fdp1_dev *fdp1)
  602. {
  603. return list_remove_job(fdp1, &fdp1->hw_job_list);
  604. }
  605. /*
  606. * Buffer lists handling
  607. */
  608. static void fdp1_field_complete(struct fdp1_ctx *ctx,
  609. struct fdp1_field_buffer *fbuf)
  610. {
  611. /* job->previous may be on the first field */
  612. if (!fbuf)
  613. return;
  614. if (fbuf->last_field)
  615. v4l2_m2m_buf_done(fbuf->vb, VB2_BUF_STATE_DONE);
  616. }
  617. static void fdp1_queue_field(struct fdp1_ctx *ctx,
  618. struct fdp1_field_buffer *fbuf)
  619. {
  620. unsigned long flags;
  621. spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
  622. list_add_tail(&fbuf->list, &ctx->fields_queue);
  623. spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
  624. ctx->buffers_queued++;
  625. }
  626. static struct fdp1_field_buffer *fdp1_dequeue_field(struct fdp1_ctx *ctx)
  627. {
  628. struct fdp1_field_buffer *fbuf;
  629. unsigned long flags;
  630. ctx->buffers_queued--;
  631. spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
  632. fbuf = list_first_entry_or_null(&ctx->fields_queue,
  633. struct fdp1_field_buffer, list);
  634. if (fbuf)
  635. list_del(&fbuf->list);
  636. spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
  637. return fbuf;
  638. }
  639. /*
  640. * Return the next field in the queue - or NULL,
  641. * without removing the item from the list
  642. */
  643. static struct fdp1_field_buffer *fdp1_peek_queued_field(struct fdp1_ctx *ctx)
  644. {
  645. struct fdp1_field_buffer *fbuf;
  646. unsigned long flags;
  647. spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
  648. fbuf = list_first_entry_or_null(&ctx->fields_queue,
  649. struct fdp1_field_buffer, list);
  650. spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
  651. return fbuf;
  652. }
  653. static u32 fdp1_read(struct fdp1_dev *fdp1, unsigned int reg)
  654. {
  655. u32 value = ioread32(fdp1->regs + reg);
  656. if (debug >= 2)
  657. dprintk(fdp1, "Read 0x%08x from 0x%04x\n", value, reg);
  658. return value;
  659. }
  660. static void fdp1_write(struct fdp1_dev *fdp1, u32 val, unsigned int reg)
  661. {
  662. if (debug >= 2)
  663. dprintk(fdp1, "Write 0x%08x to 0x%04x\n", val, reg);
  664. iowrite32(val, fdp1->regs + reg);
  665. }
  666. /* IPC registers are to be programmed with constant values */
  667. static void fdp1_set_ipc_dli(struct fdp1_ctx *ctx)
  668. {
  669. struct fdp1_dev *fdp1 = ctx->fdp1;
  670. fdp1_write(fdp1, FD1_IPC_SMSK_THRESH_CONST, FD1_IPC_SMSK_THRESH);
  671. fdp1_write(fdp1, FD1_IPC_COMB_DET_CONST, FD1_IPC_COMB_DET);
  672. fdp1_write(fdp1, FD1_IPC_MOTDEC_CONST, FD1_IPC_MOTDEC);
  673. fdp1_write(fdp1, FD1_IPC_DLI_BLEND_CONST, FD1_IPC_DLI_BLEND);
  674. fdp1_write(fdp1, FD1_IPC_DLI_HGAIN_CONST, FD1_IPC_DLI_HGAIN);
  675. fdp1_write(fdp1, FD1_IPC_DLI_SPRS_CONST, FD1_IPC_DLI_SPRS);
  676. fdp1_write(fdp1, FD1_IPC_DLI_ANGLE_CONST, FD1_IPC_DLI_ANGLE);
  677. fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX0_CONST, FD1_IPC_DLI_ISOPIX0);
  678. fdp1_write(fdp1, FD1_IPC_DLI_ISOPIX1_CONST, FD1_IPC_DLI_ISOPIX1);
  679. }
  680. static void fdp1_set_ipc_sensor(struct fdp1_ctx *ctx)
  681. {
  682. struct fdp1_dev *fdp1 = ctx->fdp1;
  683. struct fdp1_q_data *src_q_data = &ctx->out_q;
  684. unsigned int x0, x1;
  685. unsigned int hsize = src_q_data->format.width;
  686. unsigned int vsize = src_q_data->format.height;
  687. x0 = hsize / 3;
  688. x1 = 2 * hsize / 3;
  689. fdp1_write(fdp1, FD1_IPC_SENSOR_TH0_CONST, FD1_IPC_SENSOR_TH0);
  690. fdp1_write(fdp1, FD1_IPC_SENSOR_TH1_CONST, FD1_IPC_SENSOR_TH1);
  691. fdp1_write(fdp1, FD1_IPC_SENSOR_CTL0_CONST, FD1_IPC_SENSOR_CTL0);
  692. fdp1_write(fdp1, FD1_IPC_SENSOR_CTL1_CONST, FD1_IPC_SENSOR_CTL1);
  693. fdp1_write(fdp1, ((hsize - 1) << FD1_IPC_SENSOR_CTL2_X_SHIFT) |
  694. ((vsize - 1) << FD1_IPC_SENSOR_CTL2_Y_SHIFT),
  695. FD1_IPC_SENSOR_CTL2);
  696. fdp1_write(fdp1, (x0 << FD1_IPC_SENSOR_CTL3_0_SHIFT) |
  697. (x1 << FD1_IPC_SENSOR_CTL3_1_SHIFT),
  698. FD1_IPC_SENSOR_CTL3);
  699. }
  700. /*
  701. * fdp1_write_lut: Write a padded LUT to the hw
  702. *
  703. * FDP1 uses constant data for de-interlacing processing,
  704. * with large tables. These hardware tables are all 256 bytes
  705. * long, however they often contain repeated data at the end.
  706. *
  707. * The last byte of the table is written to all remaining entries.
  708. */
  709. static void fdp1_write_lut(struct fdp1_dev *fdp1, const u8 *lut,
  710. unsigned int len, unsigned int base)
  711. {
  712. unsigned int i;
  713. u8 pad;
  714. /* Tables larger than the hw are clipped */
  715. len = min(len, 256u);
  716. for (i = 0; i < len; i++)
  717. fdp1_write(fdp1, lut[i], base + (i*4));
  718. /* Tables are padded with the last entry */
  719. pad = lut[i-1];
  720. for (; i < 256; i++)
  721. fdp1_write(fdp1, pad, base + (i*4));
  722. }
  723. static void fdp1_set_lut(struct fdp1_dev *fdp1)
  724. {
  725. fdp1_write_lut(fdp1, fdp1_diff_adj, ARRAY_SIZE(fdp1_diff_adj),
  726. FD1_LUT_DIF_ADJ);
  727. fdp1_write_lut(fdp1, fdp1_sad_adj, ARRAY_SIZE(fdp1_sad_adj),
  728. FD1_LUT_SAD_ADJ);
  729. fdp1_write_lut(fdp1, fdp1_bld_gain, ARRAY_SIZE(fdp1_bld_gain),
  730. FD1_LUT_BLD_GAIN);
  731. fdp1_write_lut(fdp1, fdp1_dif_gain, ARRAY_SIZE(fdp1_dif_gain),
  732. FD1_LUT_DIF_GAIN);
  733. fdp1_write_lut(fdp1, fdp1_mdet, ARRAY_SIZE(fdp1_mdet),
  734. FD1_LUT_MDET);
  735. }
  736. static void fdp1_configure_rpf(struct fdp1_ctx *ctx,
  737. struct fdp1_job *job)
  738. {
  739. struct fdp1_dev *fdp1 = ctx->fdp1;
  740. u32 picture_size;
  741. u32 pstride;
  742. u32 format;
  743. u32 smsk_addr;
  744. struct fdp1_q_data *q_data = &ctx->out_q;
  745. /* Picture size is common to Source and Destination frames */
  746. picture_size = (q_data->format.width << FD1_RPF_SIZE_H_SHIFT)
  747. | (q_data->vsize << FD1_RPF_SIZE_V_SHIFT);
  748. /* Strides */
  749. pstride = q_data->stride_y << FD1_RPF_PSTRIDE_Y_SHIFT;
  750. if (q_data->format.num_planes > 1)
  751. pstride |= q_data->stride_c << FD1_RPF_PSTRIDE_C_SHIFT;
  752. /* Format control */
  753. format = q_data->fmt->fmt;
  754. if (q_data->fmt->swap_yc)
  755. format |= FD1_RPF_FORMAT_RSPYCS;
  756. if (q_data->fmt->swap_uv)
  757. format |= FD1_RPF_FORMAT_RSPUVS;
  758. if (job->active->field == V4L2_FIELD_BOTTOM) {
  759. format |= FD1_RPF_FORMAT_CF; /* Set for Bottom field */
  760. smsk_addr = ctx->smsk_addr[0];
  761. } else {
  762. smsk_addr = ctx->smsk_addr[1];
  763. }
  764. /* Deint mode is non-zero when deinterlacing */
  765. if (ctx->deint_mode)
  766. format |= FD1_RPF_FORMAT_CIPM;
  767. fdp1_write(fdp1, format, FD1_RPF_FORMAT);
  768. fdp1_write(fdp1, q_data->fmt->swap, FD1_RPF_SWAP);
  769. fdp1_write(fdp1, picture_size, FD1_RPF_SIZE);
  770. fdp1_write(fdp1, pstride, FD1_RPF_PSTRIDE);
  771. fdp1_write(fdp1, smsk_addr, FD1_RPF_SMSK_ADDR);
  772. /* Previous Field Channel (CH0) */
  773. if (job->previous)
  774. fdp1_write(fdp1, job->previous->addrs[0], FD1_RPF0_ADDR_Y);
  775. /* Current Field Channel (CH1) */
  776. fdp1_write(fdp1, job->active->addrs[0], FD1_RPF1_ADDR_Y);
  777. fdp1_write(fdp1, job->active->addrs[1], FD1_RPF1_ADDR_C0);
  778. fdp1_write(fdp1, job->active->addrs[2], FD1_RPF1_ADDR_C1);
  779. /* Next Field Channel (CH2) */
  780. if (job->next)
  781. fdp1_write(fdp1, job->next->addrs[0], FD1_RPF2_ADDR_Y);
  782. }
  783. static void fdp1_configure_wpf(struct fdp1_ctx *ctx,
  784. struct fdp1_job *job)
  785. {
  786. struct fdp1_dev *fdp1 = ctx->fdp1;
  787. struct fdp1_q_data *src_q_data = &ctx->out_q;
  788. struct fdp1_q_data *q_data = &ctx->cap_q;
  789. u32 pstride;
  790. u32 format;
  791. u32 swap;
  792. u32 rndctl;
  793. pstride = q_data->format.plane_fmt[0].bytesperline
  794. << FD1_WPF_PSTRIDE_Y_SHIFT;
  795. if (q_data->format.num_planes > 1)
  796. pstride |= q_data->format.plane_fmt[1].bytesperline
  797. << FD1_WPF_PSTRIDE_C_SHIFT;
  798. format = q_data->fmt->fmt; /* Output Format Code */
  799. if (q_data->fmt->swap_yc)
  800. format |= FD1_WPF_FORMAT_WSPYCS;
  801. if (q_data->fmt->swap_uv)
  802. format |= FD1_WPF_FORMAT_WSPUVS;
  803. if (fdp1_fmt_is_rgb(q_data->fmt)) {
  804. /* Enable Colour Space conversion */
  805. format |= FD1_WPF_FORMAT_CSC;
  806. /* Set WRTM */
  807. if (src_q_data->format.ycbcr_enc == V4L2_YCBCR_ENC_709)
  808. format |= FD1_WPF_FORMAT_WRTM_709_16;
  809. else if (src_q_data->format.quantization ==
  810. V4L2_QUANTIZATION_FULL_RANGE)
  811. format |= FD1_WPF_FORMAT_WRTM_601_0;
  812. else
  813. format |= FD1_WPF_FORMAT_WRTM_601_16;
  814. }
  815. /* Set an alpha value into the Pad Value */
  816. format |= ctx->alpha << FD1_WPF_FORMAT_PDV_SHIFT;
  817. /* Determine picture rounding and clipping */
  818. rndctl = FD1_WPF_RNDCTL_CBRM; /* Rounding Off */
  819. rndctl |= FD1_WPF_RNDCTL_CLMD_NOCLIP;
  820. /* WPF Swap needs both ISWAP and OSWAP setting */
  821. swap = q_data->fmt->swap << FD1_WPF_SWAP_OSWAP_SHIFT;
  822. swap |= src_q_data->fmt->swap << FD1_WPF_SWAP_SSWAP_SHIFT;
  823. fdp1_write(fdp1, format, FD1_WPF_FORMAT);
  824. fdp1_write(fdp1, rndctl, FD1_WPF_RNDCTL);
  825. fdp1_write(fdp1, swap, FD1_WPF_SWAP);
  826. fdp1_write(fdp1, pstride, FD1_WPF_PSTRIDE);
  827. fdp1_write(fdp1, job->dst->addrs[0], FD1_WPF_ADDR_Y);
  828. fdp1_write(fdp1, job->dst->addrs[1], FD1_WPF_ADDR_C0);
  829. fdp1_write(fdp1, job->dst->addrs[2], FD1_WPF_ADDR_C1);
  830. }
  831. static void fdp1_configure_deint_mode(struct fdp1_ctx *ctx,
  832. struct fdp1_job *job)
  833. {
  834. struct fdp1_dev *fdp1 = ctx->fdp1;
  835. u32 opmode = FD1_CTL_OPMODE_VIMD_NOINTERRUPT;
  836. u32 ipcmode = FD1_IPC_MODE_DLI; /* Always set */
  837. u32 channels = FD1_CTL_CHACT_WR | FD1_CTL_CHACT_RD1; /* Always on */
  838. /* De-interlacing Mode */
  839. switch (ctx->deint_mode) {
  840. default:
  841. case FDP1_PROGRESSIVE:
  842. dprintk(fdp1, "Progressive Mode\n");
  843. opmode |= FD1_CTL_OPMODE_PRG;
  844. ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
  845. break;
  846. case FDP1_ADAPT2D3D:
  847. dprintk(fdp1, "Adapt2D3D Mode\n");
  848. if (ctx->sequence == 0 || ctx->aborting)
  849. ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
  850. else
  851. ipcmode |= FD1_IPC_MODE_DIM_ADAPT2D3D;
  852. if (ctx->sequence > 1) {
  853. channels |= FD1_CTL_CHACT_SMW;
  854. channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
  855. }
  856. if (ctx->sequence > 2)
  857. channels |= FD1_CTL_CHACT_SMR;
  858. break;
  859. case FDP1_FIXED3D:
  860. dprintk(fdp1, "Fixed 3D Mode\n");
  861. ipcmode |= FD1_IPC_MODE_DIM_FIXED3D;
  862. /* Except for first and last frame, enable all channels */
  863. if (!(ctx->sequence == 0 || ctx->aborting))
  864. channels |= FD1_CTL_CHACT_RD0 | FD1_CTL_CHACT_RD2;
  865. break;
  866. case FDP1_FIXED2D:
  867. dprintk(fdp1, "Fixed 2D Mode\n");
  868. ipcmode |= FD1_IPC_MODE_DIM_FIXED2D;
  869. /* No extra channels enabled */
  870. break;
  871. case FDP1_PREVFIELD:
  872. dprintk(fdp1, "Previous Field Mode\n");
  873. ipcmode |= FD1_IPC_MODE_DIM_PREVFIELD;
  874. channels |= FD1_CTL_CHACT_RD0; /* Previous */
  875. break;
  876. case FDP1_NEXTFIELD:
  877. dprintk(fdp1, "Next Field Mode\n");
  878. ipcmode |= FD1_IPC_MODE_DIM_NEXTFIELD;
  879. channels |= FD1_CTL_CHACT_RD2; /* Next */
  880. break;
  881. }
  882. fdp1_write(fdp1, channels, FD1_CTL_CHACT);
  883. fdp1_write(fdp1, opmode, FD1_CTL_OPMODE);
  884. fdp1_write(fdp1, ipcmode, FD1_IPC_MODE);
  885. }
  886. /*
  887. * fdp1_device_process() - Run the hardware
  888. *
  889. * Configure and start the hardware to generate a single frame
  890. * of output given our input parameters.
  891. */
  892. static int fdp1_device_process(struct fdp1_ctx *ctx)
  893. {
  894. struct fdp1_dev *fdp1 = ctx->fdp1;
  895. struct fdp1_job *job;
  896. unsigned long flags;
  897. spin_lock_irqsave(&fdp1->device_process_lock, flags);
  898. /* Get a job to process */
  899. job = get_queued_job(fdp1);
  900. if (!job) {
  901. /*
  902. * VINT can call us to see if we can queue another job.
  903. * If we have no work to do, we simply return.
  904. */
  905. spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
  906. return 0;
  907. }
  908. /* First Frame only? ... */
  909. fdp1_write(fdp1, FD1_CTL_CLKCTRL_CSTP_N, FD1_CTL_CLKCTRL);
  910. /* Set the mode, and configuration */
  911. fdp1_configure_deint_mode(ctx, job);
  912. /* DLI Static Configuration */
  913. fdp1_set_ipc_dli(ctx);
  914. /* Sensor Configuration */
  915. fdp1_set_ipc_sensor(ctx);
  916. /* Setup the source picture */
  917. fdp1_configure_rpf(ctx, job);
  918. /* Setup the destination picture */
  919. fdp1_configure_wpf(ctx, job);
  920. /* Line Memory Pixel Number Register for linear access */
  921. fdp1_write(fdp1, FD1_IPC_LMEM_LINEAR, FD1_IPC_LMEM);
  922. /* Enable Interrupts */
  923. fdp1_write(fdp1, FD1_CTL_IRQ_MASK, FD1_CTL_IRQENB);
  924. /* Finally, the Immediate Registers */
  925. /* This job is now in the HW queue */
  926. queue_hw_job(fdp1, job);
  927. /* Start the command */
  928. fdp1_write(fdp1, FD1_CTL_CMD_STRCMD, FD1_CTL_CMD);
  929. /* Registers will update to HW at next VINT */
  930. fdp1_write(fdp1, FD1_CTL_REGEND_REGEND, FD1_CTL_REGEND);
  931. /* Enable VINT Generator */
  932. fdp1_write(fdp1, FD1_CTL_SGCMD_SGEN, FD1_CTL_SGCMD);
  933. spin_unlock_irqrestore(&fdp1->device_process_lock, flags);
  934. return 0;
  935. }
  936. /*
  937. * mem2mem callbacks
  938. */
  939. /*
  940. * job_ready() - check whether an instance is ready to be scheduled to run
  941. */
  942. static int fdp1_m2m_job_ready(void *priv)
  943. {
  944. struct fdp1_ctx *ctx = priv;
  945. struct fdp1_q_data *src_q_data = &ctx->out_q;
  946. int srcbufs = 1;
  947. int dstbufs = 1;
  948. dprintk(ctx->fdp1, "+ Src: %d : Dst: %d\n",
  949. v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx),
  950. v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx));
  951. /* One output buffer is required for each field */
  952. if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
  953. dstbufs = 2;
  954. if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < srcbufs
  955. || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < dstbufs) {
  956. dprintk(ctx->fdp1, "Not enough buffers available\n");
  957. return 0;
  958. }
  959. return 1;
  960. }
  961. static void fdp1_m2m_job_abort(void *priv)
  962. {
  963. struct fdp1_ctx *ctx = priv;
  964. dprintk(ctx->fdp1, "+\n");
  965. /* Will cancel the transaction in the next interrupt handler */
  966. ctx->aborting = 1;
  967. /* Immediate abort sequence */
  968. fdp1_write(ctx->fdp1, 0, FD1_CTL_SGCMD);
  969. fdp1_write(ctx->fdp1, FD1_CTL_SRESET_SRST, FD1_CTL_SRESET);
  970. }
  971. /*
  972. * fdp1_prepare_job: Prepare and queue a new job for a single action of work
  973. *
  974. * Prepare the next field, (or frame in progressive) and an output
  975. * buffer for the hardware to perform a single operation.
  976. */
  977. static struct fdp1_job *fdp1_prepare_job(struct fdp1_ctx *ctx)
  978. {
  979. struct vb2_v4l2_buffer *vbuf;
  980. struct fdp1_buffer *fbuf;
  981. struct fdp1_dev *fdp1 = ctx->fdp1;
  982. struct fdp1_job *job;
  983. unsigned int buffers_required = 1;
  984. dprintk(fdp1, "+\n");
  985. if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode))
  986. buffers_required = 2;
  987. if (ctx->buffers_queued < buffers_required)
  988. return NULL;
  989. job = fdp1_job_alloc(fdp1);
  990. if (!job) {
  991. dprintk(fdp1, "No free jobs currently available\n");
  992. return NULL;
  993. }
  994. job->active = fdp1_dequeue_field(ctx);
  995. if (!job->active) {
  996. /* Buffer check should prevent this ever happening */
  997. dprintk(fdp1, "No input buffers currently available\n");
  998. fdp1_job_free(fdp1, job);
  999. return NULL;
  1000. }
  1001. dprintk(fdp1, "+ Buffer en-route...\n");
  1002. /* Source buffers have been prepared on our buffer_queue
  1003. * Prepare our Output buffer
  1004. */
  1005. vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
  1006. fbuf = to_fdp1_buffer(vbuf);
  1007. job->dst = &fbuf->fields[0];
  1008. job->active->vb->sequence = ctx->sequence;
  1009. job->dst->vb->sequence = ctx->sequence;
  1010. ctx->sequence++;
  1011. if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode)) {
  1012. job->previous = ctx->previous;
  1013. /* Active buffer becomes the next job's previous buffer */
  1014. ctx->previous = job->active;
  1015. }
  1016. if (FDP1_DEINT_MODE_USES_NEXT(ctx->deint_mode)) {
  1017. /* Must be called after 'active' is dequeued */
  1018. job->next = fdp1_peek_queued_field(ctx);
  1019. }
  1020. /* Transfer timestamps and flags from src->dst */
  1021. job->dst->vb->vb2_buf.timestamp = job->active->vb->vb2_buf.timestamp;
  1022. job->dst->vb->flags = job->active->vb->flags &
  1023. V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
  1024. /* Ideally, the frame-end function will just 'check' to see
  1025. * if there are more jobs instead
  1026. */
  1027. ctx->translen++;
  1028. /* Finally, Put this job on the processing queue */
  1029. queue_job(fdp1, job);
  1030. dprintk(fdp1, "Job Queued translen = %d\n", ctx->translen);
  1031. return job;
  1032. }
  1033. /* fdp1_m2m_device_run() - prepares and starts the device for an M2M task
  1034. *
  1035. * A single input buffer is taken and serialised into our fdp1_buffer
  1036. * queue. The queue is then processed to create as many jobs as possible
  1037. * from our available input.
  1038. */
  1039. static void fdp1_m2m_device_run(void *priv)
  1040. {
  1041. struct fdp1_ctx *ctx = priv;
  1042. struct fdp1_dev *fdp1 = ctx->fdp1;
  1043. struct vb2_v4l2_buffer *src_vb;
  1044. struct fdp1_buffer *buf;
  1045. unsigned int i;
  1046. dprintk(fdp1, "+\n");
  1047. ctx->translen = 0;
  1048. /* Get our incoming buffer of either one or two fields, or one frame */
  1049. src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
  1050. buf = to_fdp1_buffer(src_vb);
  1051. for (i = 0; i < buf->num_fields; i++) {
  1052. struct fdp1_field_buffer *fbuf = &buf->fields[i];
  1053. fdp1_queue_field(ctx, fbuf);
  1054. dprintk(fdp1, "Queued Buffer [%d] last_field:%d\n",
  1055. i, fbuf->last_field);
  1056. }
  1057. /* Queue as many jobs as our data provides for */
  1058. while (fdp1_prepare_job(ctx))
  1059. ;
  1060. if (ctx->translen == 0) {
  1061. dprintk(fdp1, "No jobs were processed. M2M action complete\n");
  1062. v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
  1063. return;
  1064. }
  1065. /* Kick the job processing action */
  1066. fdp1_device_process(ctx);
  1067. }
  1068. /*
  1069. * device_frame_end:
  1070. *
  1071. * Handles the M2M level after a buffer completion event.
  1072. */
  1073. static void device_frame_end(struct fdp1_dev *fdp1,
  1074. enum vb2_buffer_state state)
  1075. {
  1076. struct fdp1_ctx *ctx;
  1077. unsigned long flags;
  1078. struct fdp1_job *job = get_hw_queued_job(fdp1);
  1079. dprintk(fdp1, "+\n");
  1080. ctx = v4l2_m2m_get_curr_priv(fdp1->m2m_dev);
  1081. if (ctx == NULL) {
  1082. v4l2_err(&fdp1->v4l2_dev,
  1083. "Instance released before the end of transaction\n");
  1084. return;
  1085. }
  1086. ctx->num_processed++;
  1087. /*
  1088. * fdp1_field_complete will call buf_done only when the last vb2_buffer
  1089. * reference is complete
  1090. */
  1091. if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
  1092. fdp1_field_complete(ctx, job->previous);
  1093. else
  1094. fdp1_field_complete(ctx, job->active);
  1095. spin_lock_irqsave(&fdp1->irqlock, flags);
  1096. v4l2_m2m_buf_done(job->dst->vb, state);
  1097. job->dst = NULL;
  1098. spin_unlock_irqrestore(&fdp1->irqlock, flags);
  1099. /* Move this job back to the free job list */
  1100. fdp1_job_free(fdp1, job);
  1101. dprintk(fdp1, "curr_ctx->num_processed %d curr_ctx->translen %d\n",
  1102. ctx->num_processed, ctx->translen);
  1103. if (ctx->num_processed == ctx->translen ||
  1104. ctx->aborting) {
  1105. dprintk(ctx->fdp1, "Finishing transaction\n");
  1106. ctx->num_processed = 0;
  1107. v4l2_m2m_job_finish(fdp1->m2m_dev, ctx->fh.m2m_ctx);
  1108. } else {
  1109. /*
  1110. * For pipelined performance support, this would
  1111. * be called from a VINT handler
  1112. */
  1113. fdp1_device_process(ctx);
  1114. }
  1115. }
  1116. /*
  1117. * video ioctls
  1118. */
  1119. static int fdp1_vidioc_querycap(struct file *file, void *priv,
  1120. struct v4l2_capability *cap)
  1121. {
  1122. strscpy(cap->driver, DRIVER_NAME, sizeof(cap->driver));
  1123. strscpy(cap->card, DRIVER_NAME, sizeof(cap->card));
  1124. snprintf(cap->bus_info, sizeof(cap->bus_info),
  1125. "platform:%s", DRIVER_NAME);
  1126. return 0;
  1127. }
  1128. static int fdp1_enum_fmt(struct v4l2_fmtdesc *f, u32 type)
  1129. {
  1130. unsigned int i, num;
  1131. num = 0;
  1132. for (i = 0; i < ARRAY_SIZE(fdp1_formats); ++i) {
  1133. if (fdp1_formats[i].types & type) {
  1134. if (num == f->index)
  1135. break;
  1136. ++num;
  1137. }
  1138. }
  1139. /* Format not found */
  1140. if (i >= ARRAY_SIZE(fdp1_formats))
  1141. return -EINVAL;
  1142. /* Format found */
  1143. f->pixelformat = fdp1_formats[i].fourcc;
  1144. return 0;
  1145. }
  1146. static int fdp1_enum_fmt_vid_cap(struct file *file, void *priv,
  1147. struct v4l2_fmtdesc *f)
  1148. {
  1149. return fdp1_enum_fmt(f, FDP1_CAPTURE);
  1150. }
  1151. static int fdp1_enum_fmt_vid_out(struct file *file, void *priv,
  1152. struct v4l2_fmtdesc *f)
  1153. {
  1154. return fdp1_enum_fmt(f, FDP1_OUTPUT);
  1155. }
  1156. static int fdp1_g_fmt(struct file *file, void *priv, struct v4l2_format *f)
  1157. {
  1158. struct fdp1_q_data *q_data;
  1159. struct fdp1_ctx *ctx = fh_to_ctx(priv);
  1160. if (!v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type))
  1161. return -EINVAL;
  1162. q_data = get_q_data(ctx, f->type);
  1163. f->fmt.pix_mp = q_data->format;
  1164. return 0;
  1165. }
  1166. static void fdp1_compute_stride(struct v4l2_pix_format_mplane *pix,
  1167. const struct fdp1_fmt *fmt)
  1168. {
  1169. unsigned int i;
  1170. /* Compute and clamp the stride and image size. */
  1171. for (i = 0; i < min_t(unsigned int, fmt->num_planes, 2U); ++i) {
  1172. unsigned int hsub = i > 0 ? fmt->hsub : 1;
  1173. unsigned int vsub = i > 0 ? fmt->vsub : 1;
  1174. /* From VSP : TODO: Confirm alignment limits for FDP1 */
  1175. unsigned int align = 128;
  1176. unsigned int bpl;
  1177. bpl = clamp_t(unsigned int, pix->plane_fmt[i].bytesperline,
  1178. pix->width / hsub * fmt->bpp[i] / 8,
  1179. round_down(FDP1_MAX_STRIDE, align));
  1180. pix->plane_fmt[i].bytesperline = round_up(bpl, align);
  1181. pix->plane_fmt[i].sizeimage = pix->plane_fmt[i].bytesperline
  1182. * pix->height / vsub;
  1183. memset(pix->plane_fmt[i].reserved, 0,
  1184. sizeof(pix->plane_fmt[i].reserved));
  1185. }
  1186. if (fmt->num_planes == 3) {
  1187. /* The two chroma planes must have the same stride. */
  1188. pix->plane_fmt[2].bytesperline = pix->plane_fmt[1].bytesperline;
  1189. pix->plane_fmt[2].sizeimage = pix->plane_fmt[1].sizeimage;
  1190. memset(pix->plane_fmt[2].reserved, 0,
  1191. sizeof(pix->plane_fmt[2].reserved));
  1192. }
  1193. }
  1194. static void fdp1_try_fmt_output(struct fdp1_ctx *ctx,
  1195. const struct fdp1_fmt **fmtinfo,
  1196. struct v4l2_pix_format_mplane *pix)
  1197. {
  1198. const struct fdp1_fmt *fmt;
  1199. unsigned int width;
  1200. unsigned int height;
  1201. /* Validate the pixel format to ensure the output queue supports it. */
  1202. fmt = fdp1_find_format(pix->pixelformat);
  1203. if (!fmt || !(fmt->types & FDP1_OUTPUT))
  1204. fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
  1205. if (fmtinfo)
  1206. *fmtinfo = fmt;
  1207. pix->pixelformat = fmt->fourcc;
  1208. pix->num_planes = fmt->num_planes;
  1209. /*
  1210. * Progressive video and all interlaced field orders are acceptable.
  1211. * Default to V4L2_FIELD_INTERLACED.
  1212. */
  1213. if (pix->field != V4L2_FIELD_NONE &&
  1214. pix->field != V4L2_FIELD_ALTERNATE &&
  1215. !V4L2_FIELD_HAS_BOTH(pix->field))
  1216. pix->field = V4L2_FIELD_INTERLACED;
  1217. /*
  1218. * The deinterlacer doesn't care about the colorspace, accept all values
  1219. * and default to V4L2_COLORSPACE_SMPTE170M. The YUV to RGB conversion
  1220. * at the output of the deinterlacer supports a subset of encodings and
  1221. * quantization methods and will only be available when the colorspace
  1222. * allows it.
  1223. */
  1224. if (pix->colorspace == V4L2_COLORSPACE_DEFAULT)
  1225. pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
  1226. /*
  1227. * Align the width and height for YUV 4:2:2 and 4:2:0 formats and clamp
  1228. * them to the supported frame size range. The height boundary are
  1229. * related to the full frame, divide them by two when the format passes
  1230. * fields in separate buffers.
  1231. */
  1232. width = round_down(pix->width, fmt->hsub);
  1233. pix->width = clamp(width, FDP1_MIN_W, FDP1_MAX_W);
  1234. height = round_down(pix->height, fmt->vsub);
  1235. if (pix->field == V4L2_FIELD_ALTERNATE)
  1236. pix->height = clamp(height, FDP1_MIN_H / 2, FDP1_MAX_H / 2);
  1237. else
  1238. pix->height = clamp(height, FDP1_MIN_H, FDP1_MAX_H);
  1239. fdp1_compute_stride(pix, fmt);
  1240. }
  1241. static void fdp1_try_fmt_capture(struct fdp1_ctx *ctx,
  1242. const struct fdp1_fmt **fmtinfo,
  1243. struct v4l2_pix_format_mplane *pix)
  1244. {
  1245. struct fdp1_q_data *src_data = &ctx->out_q;
  1246. enum v4l2_colorspace colorspace;
  1247. enum v4l2_ycbcr_encoding ycbcr_enc;
  1248. enum v4l2_quantization quantization;
  1249. const struct fdp1_fmt *fmt;
  1250. bool allow_rgb;
  1251. /*
  1252. * Validate the pixel format. We can only accept RGB output formats if
  1253. * the input encoding and quantization are compatible with the format
  1254. * conversions supported by the hardware. The supported combinations are
  1255. *
  1256. * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_LIM_RANGE
  1257. * V4L2_YCBCR_ENC_601 + V4L2_QUANTIZATION_FULL_RANGE
  1258. * V4L2_YCBCR_ENC_709 + V4L2_QUANTIZATION_LIM_RANGE
  1259. */
  1260. colorspace = src_data->format.colorspace;
  1261. ycbcr_enc = src_data->format.ycbcr_enc;
  1262. if (ycbcr_enc == V4L2_YCBCR_ENC_DEFAULT)
  1263. ycbcr_enc = V4L2_MAP_YCBCR_ENC_DEFAULT(colorspace);
  1264. quantization = src_data->format.quantization;
  1265. if (quantization == V4L2_QUANTIZATION_DEFAULT)
  1266. quantization = V4L2_MAP_QUANTIZATION_DEFAULT(false, colorspace,
  1267. ycbcr_enc);
  1268. allow_rgb = ycbcr_enc == V4L2_YCBCR_ENC_601 ||
  1269. (ycbcr_enc == V4L2_YCBCR_ENC_709 &&
  1270. quantization == V4L2_QUANTIZATION_LIM_RANGE);
  1271. fmt = fdp1_find_format(pix->pixelformat);
  1272. if (!fmt || (!allow_rgb && fdp1_fmt_is_rgb(fmt)))
  1273. fmt = fdp1_find_format(V4L2_PIX_FMT_YUYV);
  1274. if (fmtinfo)
  1275. *fmtinfo = fmt;
  1276. pix->pixelformat = fmt->fourcc;
  1277. pix->num_planes = fmt->num_planes;
  1278. pix->field = V4L2_FIELD_NONE;
  1279. /*
  1280. * The colorspace on the capture queue is copied from the output queue
  1281. * as the hardware can't change the colorspace. It can convert YCbCr to
  1282. * RGB though, in which case the encoding and quantization are set to
  1283. * default values as anything else wouldn't make sense.
  1284. */
  1285. pix->colorspace = src_data->format.colorspace;
  1286. pix->xfer_func = src_data->format.xfer_func;
  1287. if (fdp1_fmt_is_rgb(fmt)) {
  1288. pix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
  1289. pix->quantization = V4L2_QUANTIZATION_DEFAULT;
  1290. } else {
  1291. pix->ycbcr_enc = src_data->format.ycbcr_enc;
  1292. pix->quantization = src_data->format.quantization;
  1293. }
  1294. /*
  1295. * The frame width is identical to the output queue, and the height is
  1296. * either doubled or identical depending on whether the output queue
  1297. * field order contains one or two fields per frame.
  1298. */
  1299. pix->width = src_data->format.width;
  1300. if (src_data->format.field == V4L2_FIELD_ALTERNATE)
  1301. pix->height = 2 * src_data->format.height;
  1302. else
  1303. pix->height = src_data->format.height;
  1304. fdp1_compute_stride(pix, fmt);
  1305. }
  1306. static int fdp1_try_fmt(struct file *file, void *priv, struct v4l2_format *f)
  1307. {
  1308. struct fdp1_ctx *ctx = fh_to_ctx(priv);
  1309. if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  1310. fdp1_try_fmt_output(ctx, NULL, &f->fmt.pix_mp);
  1311. else
  1312. fdp1_try_fmt_capture(ctx, NULL, &f->fmt.pix_mp);
  1313. dprintk(ctx->fdp1, "Try %s format: %4.4s (0x%08x) %ux%u field %u\n",
  1314. V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
  1315. (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
  1316. f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
  1317. return 0;
  1318. }
  1319. static void fdp1_set_format(struct fdp1_ctx *ctx,
  1320. struct v4l2_pix_format_mplane *pix,
  1321. enum v4l2_buf_type type)
  1322. {
  1323. struct fdp1_q_data *q_data = get_q_data(ctx, type);
  1324. const struct fdp1_fmt *fmtinfo;
  1325. if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE)
  1326. fdp1_try_fmt_output(ctx, &fmtinfo, pix);
  1327. else
  1328. fdp1_try_fmt_capture(ctx, &fmtinfo, pix);
  1329. q_data->fmt = fmtinfo;
  1330. q_data->format = *pix;
  1331. q_data->vsize = pix->height;
  1332. if (pix->field != V4L2_FIELD_NONE)
  1333. q_data->vsize /= 2;
  1334. q_data->stride_y = pix->plane_fmt[0].bytesperline;
  1335. q_data->stride_c = pix->plane_fmt[1].bytesperline;
  1336. /* Adjust strides for interleaved buffers */
  1337. if (pix->field == V4L2_FIELD_INTERLACED ||
  1338. pix->field == V4L2_FIELD_INTERLACED_TB ||
  1339. pix->field == V4L2_FIELD_INTERLACED_BT) {
  1340. q_data->stride_y *= 2;
  1341. q_data->stride_c *= 2;
  1342. }
  1343. /* Propagate the format from the output node to the capture node. */
  1344. if (type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
  1345. struct fdp1_q_data *dst_data = &ctx->cap_q;
  1346. /*
  1347. * Copy the format, clear the per-plane bytes per line and image
  1348. * size, override the field and double the height if needed.
  1349. */
  1350. dst_data->format = q_data->format;
  1351. memset(dst_data->format.plane_fmt, 0,
  1352. sizeof(dst_data->format.plane_fmt));
  1353. dst_data->format.field = V4L2_FIELD_NONE;
  1354. if (pix->field == V4L2_FIELD_ALTERNATE)
  1355. dst_data->format.height *= 2;
  1356. fdp1_try_fmt_capture(ctx, &dst_data->fmt, &dst_data->format);
  1357. dst_data->vsize = dst_data->format.height;
  1358. dst_data->stride_y = dst_data->format.plane_fmt[0].bytesperline;
  1359. dst_data->stride_c = dst_data->format.plane_fmt[1].bytesperline;
  1360. }
  1361. }
  1362. static int fdp1_s_fmt(struct file *file, void *priv, struct v4l2_format *f)
  1363. {
  1364. struct fdp1_ctx *ctx = fh_to_ctx(priv);
  1365. struct v4l2_m2m_ctx *m2m_ctx = ctx->fh.m2m_ctx;
  1366. struct vb2_queue *vq = v4l2_m2m_get_vq(m2m_ctx, f->type);
  1367. if (vb2_is_busy(vq)) {
  1368. v4l2_err(&ctx->fdp1->v4l2_dev, "%s queue busy\n", __func__);
  1369. return -EBUSY;
  1370. }
  1371. fdp1_set_format(ctx, &f->fmt.pix_mp, f->type);
  1372. dprintk(ctx->fdp1, "Set %s format: %4.4s (0x%08x) %ux%u field %u\n",
  1373. V4L2_TYPE_IS_OUTPUT(f->type) ? "output" : "capture",
  1374. (char *)&f->fmt.pix_mp.pixelformat, f->fmt.pix_mp.pixelformat,
  1375. f->fmt.pix_mp.width, f->fmt.pix_mp.height, f->fmt.pix_mp.field);
  1376. return 0;
  1377. }
  1378. static int fdp1_g_ctrl(struct v4l2_ctrl *ctrl)
  1379. {
  1380. struct fdp1_ctx *ctx =
  1381. container_of(ctrl->handler, struct fdp1_ctx, hdl);
  1382. struct fdp1_q_data *src_q_data = &ctx->out_q;
  1383. switch (ctrl->id) {
  1384. case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
  1385. if (V4L2_FIELD_HAS_BOTH(src_q_data->format.field))
  1386. ctrl->val = 2;
  1387. else
  1388. ctrl->val = 1;
  1389. return 0;
  1390. }
  1391. return 1;
  1392. }
  1393. static int fdp1_s_ctrl(struct v4l2_ctrl *ctrl)
  1394. {
  1395. struct fdp1_ctx *ctx =
  1396. container_of(ctrl->handler, struct fdp1_ctx, hdl);
  1397. switch (ctrl->id) {
  1398. case V4L2_CID_ALPHA_COMPONENT:
  1399. ctx->alpha = ctrl->val;
  1400. break;
  1401. case V4L2_CID_DEINTERLACING_MODE:
  1402. ctx->deint_mode = ctrl->val;
  1403. break;
  1404. }
  1405. return 0;
  1406. }
  1407. static const struct v4l2_ctrl_ops fdp1_ctrl_ops = {
  1408. .s_ctrl = fdp1_s_ctrl,
  1409. .g_volatile_ctrl = fdp1_g_ctrl,
  1410. };
  1411. static const char * const fdp1_ctrl_deint_menu[] = {
  1412. "Progressive",
  1413. "Adaptive 2D/3D",
  1414. "Fixed 2D",
  1415. "Fixed 3D",
  1416. "Previous field",
  1417. "Next field",
  1418. NULL
  1419. };
  1420. static const struct v4l2_ioctl_ops fdp1_ioctl_ops = {
  1421. .vidioc_querycap = fdp1_vidioc_querycap,
  1422. .vidioc_enum_fmt_vid_cap = fdp1_enum_fmt_vid_cap,
  1423. .vidioc_enum_fmt_vid_out = fdp1_enum_fmt_vid_out,
  1424. .vidioc_g_fmt_vid_cap_mplane = fdp1_g_fmt,
  1425. .vidioc_g_fmt_vid_out_mplane = fdp1_g_fmt,
  1426. .vidioc_try_fmt_vid_cap_mplane = fdp1_try_fmt,
  1427. .vidioc_try_fmt_vid_out_mplane = fdp1_try_fmt,
  1428. .vidioc_s_fmt_vid_cap_mplane = fdp1_s_fmt,
  1429. .vidioc_s_fmt_vid_out_mplane = fdp1_s_fmt,
  1430. .vidioc_reqbufs = v4l2_m2m_ioctl_reqbufs,
  1431. .vidioc_querybuf = v4l2_m2m_ioctl_querybuf,
  1432. .vidioc_qbuf = v4l2_m2m_ioctl_qbuf,
  1433. .vidioc_dqbuf = v4l2_m2m_ioctl_dqbuf,
  1434. .vidioc_prepare_buf = v4l2_m2m_ioctl_prepare_buf,
  1435. .vidioc_create_bufs = v4l2_m2m_ioctl_create_bufs,
  1436. .vidioc_expbuf = v4l2_m2m_ioctl_expbuf,
  1437. .vidioc_streamon = v4l2_m2m_ioctl_streamon,
  1438. .vidioc_streamoff = v4l2_m2m_ioctl_streamoff,
  1439. .vidioc_subscribe_event = v4l2_ctrl_subscribe_event,
  1440. .vidioc_unsubscribe_event = v4l2_event_unsubscribe,
  1441. };
  1442. /*
  1443. * Queue operations
  1444. */
  1445. static int fdp1_queue_setup(struct vb2_queue *vq,
  1446. unsigned int *nbuffers, unsigned int *nplanes,
  1447. unsigned int sizes[],
  1448. struct device *alloc_ctxs[])
  1449. {
  1450. struct fdp1_ctx *ctx = vb2_get_drv_priv(vq);
  1451. struct fdp1_q_data *q_data;
  1452. unsigned int i;
  1453. q_data = get_q_data(ctx, vq->type);
  1454. if (*nplanes) {
  1455. if (*nplanes > FDP1_MAX_PLANES)
  1456. return -EINVAL;
  1457. return 0;
  1458. }
  1459. *nplanes = q_data->format.num_planes;
  1460. for (i = 0; i < *nplanes; i++)
  1461. sizes[i] = q_data->format.plane_fmt[i].sizeimage;
  1462. return 0;
  1463. }
  1464. static void fdp1_buf_prepare_field(struct fdp1_q_data *q_data,
  1465. struct vb2_v4l2_buffer *vbuf,
  1466. unsigned int field_num)
  1467. {
  1468. struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
  1469. struct fdp1_field_buffer *fbuf = &buf->fields[field_num];
  1470. unsigned int num_fields;
  1471. unsigned int i;
  1472. num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
  1473. fbuf->vb = vbuf;
  1474. fbuf->last_field = (field_num + 1) == num_fields;
  1475. for (i = 0; i < vbuf->vb2_buf.num_planes; ++i)
  1476. fbuf->addrs[i] = vb2_dma_contig_plane_dma_addr(&vbuf->vb2_buf, i);
  1477. switch (vbuf->field) {
  1478. case V4L2_FIELD_INTERLACED:
  1479. /*
  1480. * Interlaced means bottom-top for 60Hz TV standards (NTSC) and
  1481. * top-bottom for 50Hz. As TV standards are not applicable to
  1482. * the mem-to-mem API, use the height as a heuristic.
  1483. */
  1484. fbuf->field = (q_data->format.height < 576) == field_num
  1485. ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
  1486. break;
  1487. case V4L2_FIELD_INTERLACED_TB:
  1488. case V4L2_FIELD_SEQ_TB:
  1489. fbuf->field = field_num ? V4L2_FIELD_BOTTOM : V4L2_FIELD_TOP;
  1490. break;
  1491. case V4L2_FIELD_INTERLACED_BT:
  1492. case V4L2_FIELD_SEQ_BT:
  1493. fbuf->field = field_num ? V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
  1494. break;
  1495. default:
  1496. fbuf->field = vbuf->field;
  1497. break;
  1498. }
  1499. /* Buffer is completed */
  1500. if (!field_num)
  1501. return;
  1502. /* Adjust buffer addresses for second field */
  1503. switch (vbuf->field) {
  1504. case V4L2_FIELD_INTERLACED:
  1505. case V4L2_FIELD_INTERLACED_TB:
  1506. case V4L2_FIELD_INTERLACED_BT:
  1507. for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
  1508. fbuf->addrs[i] +=
  1509. (i == 0 ? q_data->stride_y : q_data->stride_c);
  1510. break;
  1511. case V4L2_FIELD_SEQ_TB:
  1512. case V4L2_FIELD_SEQ_BT:
  1513. for (i = 0; i < vbuf->vb2_buf.num_planes; i++)
  1514. fbuf->addrs[i] += q_data->vsize *
  1515. (i == 0 ? q_data->stride_y : q_data->stride_c);
  1516. break;
  1517. }
  1518. }
  1519. static int fdp1_buf_prepare(struct vb2_buffer *vb)
  1520. {
  1521. struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
  1522. struct fdp1_q_data *q_data = get_q_data(ctx, vb->vb2_queue->type);
  1523. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1524. struct fdp1_buffer *buf = to_fdp1_buffer(vbuf);
  1525. unsigned int i;
  1526. if (V4L2_TYPE_IS_OUTPUT(vb->vb2_queue->type)) {
  1527. bool field_valid = true;
  1528. /* Validate the buffer field. */
  1529. switch (q_data->format.field) {
  1530. case V4L2_FIELD_NONE:
  1531. if (vbuf->field != V4L2_FIELD_NONE)
  1532. field_valid = false;
  1533. break;
  1534. case V4L2_FIELD_ALTERNATE:
  1535. if (vbuf->field != V4L2_FIELD_TOP &&
  1536. vbuf->field != V4L2_FIELD_BOTTOM)
  1537. field_valid = false;
  1538. break;
  1539. case V4L2_FIELD_INTERLACED:
  1540. case V4L2_FIELD_SEQ_TB:
  1541. case V4L2_FIELD_SEQ_BT:
  1542. case V4L2_FIELD_INTERLACED_TB:
  1543. case V4L2_FIELD_INTERLACED_BT:
  1544. if (vbuf->field != q_data->format.field)
  1545. field_valid = false;
  1546. break;
  1547. }
  1548. if (!field_valid) {
  1549. dprintk(ctx->fdp1,
  1550. "buffer field %u invalid for format field %u\n",
  1551. vbuf->field, q_data->format.field);
  1552. return -EINVAL;
  1553. }
  1554. } else {
  1555. vbuf->field = V4L2_FIELD_NONE;
  1556. }
  1557. /* Validate the planes sizes. */
  1558. for (i = 0; i < q_data->format.num_planes; i++) {
  1559. unsigned long size = q_data->format.plane_fmt[i].sizeimage;
  1560. if (vb2_plane_size(vb, i) < size) {
  1561. dprintk(ctx->fdp1,
  1562. "data will not fit into plane [%u/%u] (%lu < %lu)\n",
  1563. i, q_data->format.num_planes,
  1564. vb2_plane_size(vb, i), size);
  1565. return -EINVAL;
  1566. }
  1567. /* We have known size formats all around */
  1568. vb2_set_plane_payload(vb, i, size);
  1569. }
  1570. buf->num_fields = V4L2_FIELD_HAS_BOTH(vbuf->field) ? 2 : 1;
  1571. for (i = 0; i < buf->num_fields; ++i)
  1572. fdp1_buf_prepare_field(q_data, vbuf, i);
  1573. return 0;
  1574. }
  1575. static void fdp1_buf_queue(struct vb2_buffer *vb)
  1576. {
  1577. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  1578. struct fdp1_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
  1579. v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vbuf);
  1580. }
  1581. static int fdp1_start_streaming(struct vb2_queue *q, unsigned int count)
  1582. {
  1583. struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
  1584. struct fdp1_q_data *q_data = get_q_data(ctx, q->type);
  1585. if (V4L2_TYPE_IS_OUTPUT(q->type)) {
  1586. /*
  1587. * Force our deint_mode when we are progressive,
  1588. * ignoring any setting on the device from the user,
  1589. * Otherwise, lock in the requested de-interlace mode.
  1590. */
  1591. if (q_data->format.field == V4L2_FIELD_NONE)
  1592. ctx->deint_mode = FDP1_PROGRESSIVE;
  1593. if (ctx->deint_mode == FDP1_ADAPT2D3D) {
  1594. u32 stride;
  1595. dma_addr_t smsk_base;
  1596. const u32 bpp = 2; /* bytes per pixel */
  1597. stride = round_up(q_data->format.width, 8);
  1598. ctx->smsk_size = bpp * stride * q_data->vsize;
  1599. ctx->smsk_cpu = dma_alloc_coherent(ctx->fdp1->dev,
  1600. ctx->smsk_size, &smsk_base, GFP_KERNEL);
  1601. if (ctx->smsk_cpu == NULL) {
  1602. dprintk(ctx->fdp1, "Failed to alloc smsk\n");
  1603. return -ENOMEM;
  1604. }
  1605. ctx->smsk_addr[0] = smsk_base;
  1606. ctx->smsk_addr[1] = smsk_base + (ctx->smsk_size/2);
  1607. }
  1608. }
  1609. return 0;
  1610. }
  1611. static void fdp1_stop_streaming(struct vb2_queue *q)
  1612. {
  1613. struct fdp1_ctx *ctx = vb2_get_drv_priv(q);
  1614. struct vb2_v4l2_buffer *vbuf;
  1615. unsigned long flags;
  1616. while (1) {
  1617. if (V4L2_TYPE_IS_OUTPUT(q->type))
  1618. vbuf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx);
  1619. else
  1620. vbuf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx);
  1621. if (vbuf == NULL)
  1622. break;
  1623. spin_lock_irqsave(&ctx->fdp1->irqlock, flags);
  1624. v4l2_m2m_buf_done(vbuf, VB2_BUF_STATE_ERROR);
  1625. spin_unlock_irqrestore(&ctx->fdp1->irqlock, flags);
  1626. }
  1627. /* Empty Output queues */
  1628. if (V4L2_TYPE_IS_OUTPUT(q->type)) {
  1629. /* Empty our internal queues */
  1630. struct fdp1_field_buffer *fbuf;
  1631. /* Free any queued buffers */
  1632. fbuf = fdp1_dequeue_field(ctx);
  1633. while (fbuf != NULL) {
  1634. fdp1_field_complete(ctx, fbuf);
  1635. fbuf = fdp1_dequeue_field(ctx);
  1636. }
  1637. /* Free smsk_data */
  1638. if (ctx->smsk_cpu) {
  1639. dma_free_coherent(ctx->fdp1->dev, ctx->smsk_size,
  1640. ctx->smsk_cpu, ctx->smsk_addr[0]);
  1641. ctx->smsk_addr[0] = ctx->smsk_addr[1] = 0;
  1642. ctx->smsk_cpu = NULL;
  1643. }
  1644. WARN(!list_empty(&ctx->fields_queue),
  1645. "Buffer queue not empty");
  1646. } else {
  1647. /* Empty Capture queues (Jobs) */
  1648. struct fdp1_job *job;
  1649. job = get_queued_job(ctx->fdp1);
  1650. while (job) {
  1651. if (FDP1_DEINT_MODE_USES_PREV(ctx->deint_mode))
  1652. fdp1_field_complete(ctx, job->previous);
  1653. else
  1654. fdp1_field_complete(ctx, job->active);
  1655. v4l2_m2m_buf_done(job->dst->vb, VB2_BUF_STATE_ERROR);
  1656. job->dst = NULL;
  1657. job = get_queued_job(ctx->fdp1);
  1658. }
  1659. /* Free any held buffer in the ctx */
  1660. fdp1_field_complete(ctx, ctx->previous);
  1661. WARN(!list_empty(&ctx->fdp1->queued_job_list),
  1662. "Queued Job List not empty");
  1663. WARN(!list_empty(&ctx->fdp1->hw_job_list),
  1664. "HW Job list not empty");
  1665. }
  1666. }
  1667. static const struct vb2_ops fdp1_qops = {
  1668. .queue_setup = fdp1_queue_setup,
  1669. .buf_prepare = fdp1_buf_prepare,
  1670. .buf_queue = fdp1_buf_queue,
  1671. .start_streaming = fdp1_start_streaming,
  1672. .stop_streaming = fdp1_stop_streaming,
  1673. .wait_prepare = vb2_ops_wait_prepare,
  1674. .wait_finish = vb2_ops_wait_finish,
  1675. };
  1676. static int queue_init(void *priv, struct vb2_queue *src_vq,
  1677. struct vb2_queue *dst_vq)
  1678. {
  1679. struct fdp1_ctx *ctx = priv;
  1680. int ret;
  1681. src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
  1682. src_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  1683. src_vq->drv_priv = ctx;
  1684. src_vq->buf_struct_size = sizeof(struct fdp1_buffer);
  1685. src_vq->ops = &fdp1_qops;
  1686. src_vq->mem_ops = &vb2_dma_contig_memops;
  1687. src_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1688. src_vq->lock = &ctx->fdp1->dev_mutex;
  1689. src_vq->dev = ctx->fdp1->dev;
  1690. ret = vb2_queue_init(src_vq);
  1691. if (ret)
  1692. return ret;
  1693. dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
  1694. dst_vq->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
  1695. dst_vq->drv_priv = ctx;
  1696. dst_vq->buf_struct_size = sizeof(struct fdp1_buffer);
  1697. dst_vq->ops = &fdp1_qops;
  1698. dst_vq->mem_ops = &vb2_dma_contig_memops;
  1699. dst_vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY;
  1700. dst_vq->lock = &ctx->fdp1->dev_mutex;
  1701. dst_vq->dev = ctx->fdp1->dev;
  1702. return vb2_queue_init(dst_vq);
  1703. }
  1704. /*
  1705. * File operations
  1706. */
  1707. static int fdp1_open(struct file *file)
  1708. {
  1709. struct fdp1_dev *fdp1 = video_drvdata(file);
  1710. struct v4l2_pix_format_mplane format;
  1711. struct fdp1_ctx *ctx = NULL;
  1712. struct v4l2_ctrl *ctrl;
  1713. int ret = 0;
  1714. if (mutex_lock_interruptible(&fdp1->dev_mutex))
  1715. return -ERESTARTSYS;
  1716. ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
  1717. if (!ctx) {
  1718. ret = -ENOMEM;
  1719. goto done;
  1720. }
  1721. v4l2_fh_init(&ctx->fh, video_devdata(file));
  1722. file->private_data = &ctx->fh;
  1723. ctx->fdp1 = fdp1;
  1724. /* Initialise Queues */
  1725. INIT_LIST_HEAD(&ctx->fields_queue);
  1726. ctx->translen = 1;
  1727. ctx->sequence = 0;
  1728. /* Initialise controls */
  1729. v4l2_ctrl_handler_init(&ctx->hdl, 3);
  1730. v4l2_ctrl_new_std_menu_items(&ctx->hdl, &fdp1_ctrl_ops,
  1731. V4L2_CID_DEINTERLACING_MODE,
  1732. FDP1_NEXTFIELD, BIT(0), FDP1_FIXED3D,
  1733. fdp1_ctrl_deint_menu);
  1734. ctrl = v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
  1735. V4L2_CID_MIN_BUFFERS_FOR_CAPTURE, 1, 2, 1, 1);
  1736. if (ctrl)
  1737. ctrl->flags |= V4L2_CTRL_FLAG_VOLATILE;
  1738. v4l2_ctrl_new_std(&ctx->hdl, &fdp1_ctrl_ops,
  1739. V4L2_CID_ALPHA_COMPONENT, 0, 255, 1, 255);
  1740. if (ctx->hdl.error) {
  1741. ret = ctx->hdl.error;
  1742. v4l2_ctrl_handler_free(&ctx->hdl);
  1743. kfree(ctx);
  1744. goto done;
  1745. }
  1746. ctx->fh.ctrl_handler = &ctx->hdl;
  1747. v4l2_ctrl_handler_setup(&ctx->hdl);
  1748. /* Configure default parameters. */
  1749. memset(&format, 0, sizeof(format));
  1750. fdp1_set_format(ctx, &format, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
  1751. ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fdp1->m2m_dev, ctx, &queue_init);
  1752. if (IS_ERR(ctx->fh.m2m_ctx)) {
  1753. ret = PTR_ERR(ctx->fh.m2m_ctx);
  1754. v4l2_ctrl_handler_free(&ctx->hdl);
  1755. kfree(ctx);
  1756. goto done;
  1757. }
  1758. /* Perform any power management required */
  1759. pm_runtime_get_sync(fdp1->dev);
  1760. v4l2_fh_add(&ctx->fh);
  1761. dprintk(fdp1, "Created instance: %p, m2m_ctx: %p\n",
  1762. ctx, ctx->fh.m2m_ctx);
  1763. done:
  1764. mutex_unlock(&fdp1->dev_mutex);
  1765. return ret;
  1766. }
  1767. static int fdp1_release(struct file *file)
  1768. {
  1769. struct fdp1_dev *fdp1 = video_drvdata(file);
  1770. struct fdp1_ctx *ctx = fh_to_ctx(file->private_data);
  1771. dprintk(fdp1, "Releasing instance %p\n", ctx);
  1772. v4l2_fh_del(&ctx->fh);
  1773. v4l2_fh_exit(&ctx->fh);
  1774. v4l2_ctrl_handler_free(&ctx->hdl);
  1775. mutex_lock(&fdp1->dev_mutex);
  1776. v4l2_m2m_ctx_release(ctx->fh.m2m_ctx);
  1777. mutex_unlock(&fdp1->dev_mutex);
  1778. kfree(ctx);
  1779. pm_runtime_put(fdp1->dev);
  1780. return 0;
  1781. }
  1782. static const struct v4l2_file_operations fdp1_fops = {
  1783. .owner = THIS_MODULE,
  1784. .open = fdp1_open,
  1785. .release = fdp1_release,
  1786. .poll = v4l2_m2m_fop_poll,
  1787. .unlocked_ioctl = video_ioctl2,
  1788. .mmap = v4l2_m2m_fop_mmap,
  1789. };
  1790. static const struct video_device fdp1_videodev = {
  1791. .name = DRIVER_NAME,
  1792. .vfl_dir = VFL_DIR_M2M,
  1793. .fops = &fdp1_fops,
  1794. .device_caps = V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING,
  1795. .ioctl_ops = &fdp1_ioctl_ops,
  1796. .minor = -1,
  1797. .release = video_device_release_empty,
  1798. };
  1799. static const struct v4l2_m2m_ops m2m_ops = {
  1800. .device_run = fdp1_m2m_device_run,
  1801. .job_ready = fdp1_m2m_job_ready,
  1802. .job_abort = fdp1_m2m_job_abort,
  1803. };
  1804. static irqreturn_t fdp1_irq_handler(int irq, void *dev_id)
  1805. {
  1806. struct fdp1_dev *fdp1 = dev_id;
  1807. u32 int_status;
  1808. u32 ctl_status;
  1809. u32 vint_cnt;
  1810. u32 cycles;
  1811. int_status = fdp1_read(fdp1, FD1_CTL_IRQSTA);
  1812. cycles = fdp1_read(fdp1, FD1_CTL_VCYCLE_STAT);
  1813. ctl_status = fdp1_read(fdp1, FD1_CTL_STATUS);
  1814. vint_cnt = (ctl_status & FD1_CTL_STATUS_VINT_CNT_MASK) >>
  1815. FD1_CTL_STATUS_VINT_CNT_SHIFT;
  1816. /* Clear interrupts */
  1817. fdp1_write(fdp1, ~(int_status) & FD1_CTL_IRQ_MASK, FD1_CTL_IRQSTA);
  1818. if (debug >= 2) {
  1819. dprintk(fdp1, "IRQ: 0x%x %s%s%s\n", int_status,
  1820. int_status & FD1_CTL_IRQ_VERE ? "[Error]" : "[!E]",
  1821. int_status & FD1_CTL_IRQ_VINTE ? "[VSync]" : "[!V]",
  1822. int_status & FD1_CTL_IRQ_FREE ? "[FrameEnd]" : "[!F]");
  1823. dprintk(fdp1, "CycleStatus = %d (%dms)\n",
  1824. cycles, cycles/(fdp1->clk_rate/1000));
  1825. dprintk(fdp1,
  1826. "Control Status = 0x%08x : VINT_CNT = %d %s:%s:%s:%s\n",
  1827. ctl_status, vint_cnt,
  1828. ctl_status & FD1_CTL_STATUS_SGREGSET ? "RegSet" : "",
  1829. ctl_status & FD1_CTL_STATUS_SGVERR ? "Vsync Error" : "",
  1830. ctl_status & FD1_CTL_STATUS_SGFREND ? "FrameEnd" : "",
  1831. ctl_status & FD1_CTL_STATUS_BSY ? "Busy" : "");
  1832. dprintk(fdp1, "***********************************\n");
  1833. }
  1834. /* Spurious interrupt */
  1835. if (!(FD1_CTL_IRQ_MASK & int_status))
  1836. return IRQ_NONE;
  1837. /* Work completed, release the frame */
  1838. if (FD1_CTL_IRQ_VERE & int_status)
  1839. device_frame_end(fdp1, VB2_BUF_STATE_ERROR);
  1840. else if (FD1_CTL_IRQ_FREE & int_status)
  1841. device_frame_end(fdp1, VB2_BUF_STATE_DONE);
  1842. return IRQ_HANDLED;
  1843. }
  1844. static int fdp1_probe(struct platform_device *pdev)
  1845. {
  1846. struct fdp1_dev *fdp1;
  1847. struct video_device *vfd;
  1848. struct device_node *fcp_node;
  1849. struct resource *res;
  1850. struct clk *clk;
  1851. unsigned int i;
  1852. int ret;
  1853. int hw_version;
  1854. fdp1 = devm_kzalloc(&pdev->dev, sizeof(*fdp1), GFP_KERNEL);
  1855. if (!fdp1)
  1856. return -ENOMEM;
  1857. INIT_LIST_HEAD(&fdp1->free_job_list);
  1858. INIT_LIST_HEAD(&fdp1->queued_job_list);
  1859. INIT_LIST_HEAD(&fdp1->hw_job_list);
  1860. /* Initialise the jobs on the free list */
  1861. for (i = 0; i < ARRAY_SIZE(fdp1->jobs); i++)
  1862. list_add(&fdp1->jobs[i].list, &fdp1->free_job_list);
  1863. mutex_init(&fdp1->dev_mutex);
  1864. spin_lock_init(&fdp1->irqlock);
  1865. spin_lock_init(&fdp1->device_process_lock);
  1866. fdp1->dev = &pdev->dev;
  1867. platform_set_drvdata(pdev, fdp1);
  1868. /* Memory-mapped registers */
  1869. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1870. fdp1->regs = devm_ioremap_resource(&pdev->dev, res);
  1871. if (IS_ERR(fdp1->regs))
  1872. return PTR_ERR(fdp1->regs);
  1873. /* Interrupt service routine registration */
  1874. fdp1->irq = ret = platform_get_irq(pdev, 0);
  1875. if (ret < 0) {
  1876. dev_err(&pdev->dev, "cannot find IRQ\n");
  1877. return ret;
  1878. }
  1879. ret = devm_request_irq(&pdev->dev, fdp1->irq, fdp1_irq_handler, 0,
  1880. dev_name(&pdev->dev), fdp1);
  1881. if (ret) {
  1882. dev_err(&pdev->dev, "cannot claim IRQ %d\n", fdp1->irq);
  1883. return ret;
  1884. }
  1885. /* FCP */
  1886. fcp_node = of_parse_phandle(pdev->dev.of_node, "renesas,fcp", 0);
  1887. if (fcp_node) {
  1888. fdp1->fcp = rcar_fcp_get(fcp_node);
  1889. of_node_put(fcp_node);
  1890. if (IS_ERR(fdp1->fcp)) {
  1891. dev_dbg(&pdev->dev, "FCP not found (%ld)\n",
  1892. PTR_ERR(fdp1->fcp));
  1893. return PTR_ERR(fdp1->fcp);
  1894. }
  1895. }
  1896. /* Determine our clock rate */
  1897. clk = clk_get(&pdev->dev, NULL);
  1898. if (IS_ERR(clk))
  1899. return PTR_ERR(clk);
  1900. fdp1->clk_rate = clk_get_rate(clk);
  1901. clk_put(clk);
  1902. /* V4L2 device registration */
  1903. ret = v4l2_device_register(&pdev->dev, &fdp1->v4l2_dev);
  1904. if (ret) {
  1905. v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
  1906. return ret;
  1907. }
  1908. /* M2M registration */
  1909. fdp1->m2m_dev = v4l2_m2m_init(&m2m_ops);
  1910. if (IS_ERR(fdp1->m2m_dev)) {
  1911. v4l2_err(&fdp1->v4l2_dev, "Failed to init mem2mem device\n");
  1912. ret = PTR_ERR(fdp1->m2m_dev);
  1913. goto unreg_dev;
  1914. }
  1915. /* Video registration */
  1916. fdp1->vfd = fdp1_videodev;
  1917. vfd = &fdp1->vfd;
  1918. vfd->lock = &fdp1->dev_mutex;
  1919. vfd->v4l2_dev = &fdp1->v4l2_dev;
  1920. video_set_drvdata(vfd, fdp1);
  1921. strscpy(vfd->name, fdp1_videodev.name, sizeof(vfd->name));
  1922. ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
  1923. if (ret) {
  1924. v4l2_err(&fdp1->v4l2_dev, "Failed to register video device\n");
  1925. goto release_m2m;
  1926. }
  1927. v4l2_info(&fdp1->v4l2_dev, "Device registered as /dev/video%d\n",
  1928. vfd->num);
  1929. /* Power up the cells to read HW */
  1930. pm_runtime_enable(&pdev->dev);
  1931. pm_runtime_get_sync(fdp1->dev);
  1932. hw_version = fdp1_read(fdp1, FD1_IP_INTDATA);
  1933. switch (hw_version) {
  1934. case FD1_IP_H3_ES1:
  1935. dprintk(fdp1, "FDP1 Version R-Car H3 ES1\n");
  1936. break;
  1937. case FD1_IP_M3W:
  1938. dprintk(fdp1, "FDP1 Version R-Car M3-W\n");
  1939. break;
  1940. case FD1_IP_H3:
  1941. dprintk(fdp1, "FDP1 Version R-Car H3\n");
  1942. break;
  1943. case FD1_IP_M3N:
  1944. dprintk(fdp1, "FDP1 Version R-Car M3-N\n");
  1945. break;
  1946. case FD1_IP_E3:
  1947. dprintk(fdp1, "FDP1 Version R-Car E3\n");
  1948. break;
  1949. default:
  1950. dev_err(fdp1->dev, "FDP1 Unidentifiable (0x%08x)\n",
  1951. hw_version);
  1952. }
  1953. /* Allow the hw to sleep until an open call puts it to use */
  1954. pm_runtime_put(fdp1->dev);
  1955. return 0;
  1956. release_m2m:
  1957. v4l2_m2m_release(fdp1->m2m_dev);
  1958. unreg_dev:
  1959. v4l2_device_unregister(&fdp1->v4l2_dev);
  1960. return ret;
  1961. }
  1962. static int fdp1_remove(struct platform_device *pdev)
  1963. {
  1964. struct fdp1_dev *fdp1 = platform_get_drvdata(pdev);
  1965. v4l2_m2m_release(fdp1->m2m_dev);
  1966. video_unregister_device(&fdp1->vfd);
  1967. v4l2_device_unregister(&fdp1->v4l2_dev);
  1968. pm_runtime_disable(&pdev->dev);
  1969. return 0;
  1970. }
  1971. static int __maybe_unused fdp1_pm_runtime_suspend(struct device *dev)
  1972. {
  1973. struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
  1974. rcar_fcp_disable(fdp1->fcp);
  1975. return 0;
  1976. }
  1977. static int __maybe_unused fdp1_pm_runtime_resume(struct device *dev)
  1978. {
  1979. struct fdp1_dev *fdp1 = dev_get_drvdata(dev);
  1980. /* Program in the static LUTs */
  1981. fdp1_set_lut(fdp1);
  1982. return rcar_fcp_enable(fdp1->fcp);
  1983. }
  1984. static const struct dev_pm_ops fdp1_pm_ops = {
  1985. SET_RUNTIME_PM_OPS(fdp1_pm_runtime_suspend,
  1986. fdp1_pm_runtime_resume,
  1987. NULL)
  1988. };
  1989. static const struct of_device_id fdp1_dt_ids[] = {
  1990. { .compatible = "renesas,fdp1" },
  1991. { },
  1992. };
  1993. MODULE_DEVICE_TABLE(of, fdp1_dt_ids);
  1994. static struct platform_driver fdp1_pdrv = {
  1995. .probe = fdp1_probe,
  1996. .remove = fdp1_remove,
  1997. .driver = {
  1998. .name = DRIVER_NAME,
  1999. .of_match_table = fdp1_dt_ids,
  2000. .pm = &fdp1_pm_ops,
  2001. },
  2002. };
  2003. module_platform_driver(fdp1_pdrv);
  2004. MODULE_DESCRIPTION("Renesas R-Car Fine Display Processor Driver");
  2005. MODULE_AUTHOR("Kieran Bingham <kieran@bingham.xyz>");
  2006. MODULE_LICENSE("GPL");
  2007. MODULE_ALIAS("platform:" DRIVER_NAME);