vivid-vid-cap.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881
  1. /*
  2. * vivid-vid-cap.c - video capture support functions.
  3. *
  4. * Copyright 2014 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
  5. *
  6. * This program is free software; you may redistribute it and/or modify
  7. * it under the terms of the GNU General Public License as published by
  8. * the Free Software Foundation; version 2 of the License.
  9. *
  10. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  11. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  12. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  13. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  14. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  15. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  16. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  17. * SOFTWARE.
  18. */
  19. #include <linux/errno.h>
  20. #include <linux/kernel.h>
  21. #include <linux/sched.h>
  22. #include <linux/vmalloc.h>
  23. #include <linux/videodev2.h>
  24. #include <linux/v4l2-dv-timings.h>
  25. #include <media/v4l2-common.h>
  26. #include <media/v4l2-event.h>
  27. #include <media/v4l2-dv-timings.h>
  28. #include <media/v4l2-rect.h>
  29. #include "vivid-core.h"
  30. #include "vivid-vid-common.h"
  31. #include "vivid-kthread-cap.h"
  32. #include "vivid-vid-cap.h"
  33. /* timeperframe: min/max and default */
  34. static const struct v4l2_fract
  35. tpf_min = {.numerator = 1, .denominator = FPS_MAX},
  36. tpf_max = {.numerator = FPS_MAX, .denominator = 1};
  37. static const struct vivid_fmt formats_ovl[] = {
  38. {
  39. .fourcc = V4L2_PIX_FMT_RGB565, /* gggbbbbb rrrrrggg */
  40. .vdownsampling = { 1 },
  41. .bit_depth = { 16 },
  42. .planes = 1,
  43. .buffers = 1,
  44. },
  45. {
  46. .fourcc = V4L2_PIX_FMT_XRGB555, /* gggbbbbb arrrrrgg */
  47. .vdownsampling = { 1 },
  48. .bit_depth = { 16 },
  49. .planes = 1,
  50. .buffers = 1,
  51. },
  52. {
  53. .fourcc = V4L2_PIX_FMT_ARGB555, /* gggbbbbb arrrrrgg */
  54. .vdownsampling = { 1 },
  55. .bit_depth = { 16 },
  56. .planes = 1,
  57. .buffers = 1,
  58. },
  59. };
  60. /* The number of discrete webcam framesizes */
  61. #define VIVID_WEBCAM_SIZES 5
  62. /* The number of discrete webcam frameintervals */
  63. #define VIVID_WEBCAM_IVALS (VIVID_WEBCAM_SIZES * 2)
  64. /* Sizes must be in increasing order */
  65. static const struct v4l2_frmsize_discrete webcam_sizes[VIVID_WEBCAM_SIZES] = {
  66. { 320, 180 },
  67. { 640, 360 },
  68. { 1280, 720 },
  69. { 1920, 1080 },
  70. { 3840, 2160 },
  71. };
  72. /*
  73. * Intervals must be in increasing order and there must be twice as many
  74. * elements in this array as there are in webcam_sizes.
  75. */
  76. static const struct v4l2_fract webcam_intervals[VIVID_WEBCAM_IVALS] = {
  77. { 1, 1 },
  78. { 1, 2 },
  79. { 1, 4 },
  80. { 1, 5 },
  81. { 1, 10 },
  82. { 1, 15 },
  83. { 1, 25 },
  84. { 1, 30 },
  85. { 1, 50 },
  86. { 1, 60 },
  87. };
  88. static const struct v4l2_discrete_probe webcam_probe = {
  89. webcam_sizes,
  90. VIVID_WEBCAM_SIZES
  91. };
  92. static int vid_cap_queue_setup(struct vb2_queue *vq,
  93. unsigned *nbuffers, unsigned *nplanes,
  94. unsigned sizes[], struct device *alloc_devs[])
  95. {
  96. struct vivid_dev *dev = vb2_get_drv_priv(vq);
  97. unsigned buffers = tpg_g_buffers(&dev->tpg);
  98. unsigned h = dev->fmt_cap_rect.height;
  99. unsigned p;
  100. if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
  101. /*
  102. * You cannot use read() with FIELD_ALTERNATE since the field
  103. * information (TOP/BOTTOM) cannot be passed back to the user.
  104. */
  105. if (vb2_fileio_is_active(vq))
  106. return -EINVAL;
  107. }
  108. if (dev->queue_setup_error) {
  109. /*
  110. * Error injection: test what happens if queue_setup() returns
  111. * an error.
  112. */
  113. dev->queue_setup_error = false;
  114. return -EINVAL;
  115. }
  116. if (*nplanes) {
  117. /*
  118. * Check if the number of requested planes match
  119. * the number of buffers in the current format. You can't mix that.
  120. */
  121. if (*nplanes != buffers)
  122. return -EINVAL;
  123. for (p = 0; p < buffers; p++) {
  124. if (sizes[p] < tpg_g_line_width(&dev->tpg, p) * h +
  125. dev->fmt_cap->data_offset[p])
  126. return -EINVAL;
  127. }
  128. } else {
  129. for (p = 0; p < buffers; p++)
  130. sizes[p] = tpg_g_line_width(&dev->tpg, p) * h +
  131. dev->fmt_cap->data_offset[p];
  132. }
  133. if (vq->num_buffers + *nbuffers < 2)
  134. *nbuffers = 2 - vq->num_buffers;
  135. *nplanes = buffers;
  136. dprintk(dev, 1, "%s: count=%d\n", __func__, *nbuffers);
  137. for (p = 0; p < buffers; p++)
  138. dprintk(dev, 1, "%s: size[%u]=%u\n", __func__, p, sizes[p]);
  139. return 0;
  140. }
  141. static int vid_cap_buf_prepare(struct vb2_buffer *vb)
  142. {
  143. struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
  144. unsigned long size;
  145. unsigned buffers = tpg_g_buffers(&dev->tpg);
  146. unsigned p;
  147. dprintk(dev, 1, "%s\n", __func__);
  148. if (WARN_ON(NULL == dev->fmt_cap))
  149. return -EINVAL;
  150. if (dev->buf_prepare_error) {
  151. /*
  152. * Error injection: test what happens if buf_prepare() returns
  153. * an error.
  154. */
  155. dev->buf_prepare_error = false;
  156. return -EINVAL;
  157. }
  158. for (p = 0; p < buffers; p++) {
  159. size = tpg_g_line_width(&dev->tpg, p) * dev->fmt_cap_rect.height +
  160. dev->fmt_cap->data_offset[p];
  161. if (vb2_plane_size(vb, p) < size) {
  162. dprintk(dev, 1, "%s data will not fit into plane %u (%lu < %lu)\n",
  163. __func__, p, vb2_plane_size(vb, p), size);
  164. return -EINVAL;
  165. }
  166. vb2_set_plane_payload(vb, p, size);
  167. vb->planes[p].data_offset = dev->fmt_cap->data_offset[p];
  168. }
  169. return 0;
  170. }
  171. static void vid_cap_buf_finish(struct vb2_buffer *vb)
  172. {
  173. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  174. struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
  175. struct v4l2_timecode *tc = &vbuf->timecode;
  176. unsigned fps = 25;
  177. unsigned seq = vbuf->sequence;
  178. if (!vivid_is_sdtv_cap(dev))
  179. return;
  180. /*
  181. * Set the timecode. Rarely used, so it is interesting to
  182. * test this.
  183. */
  184. vbuf->flags |= V4L2_BUF_FLAG_TIMECODE;
  185. if (dev->std_cap & V4L2_STD_525_60)
  186. fps = 30;
  187. tc->type = (fps == 30) ? V4L2_TC_TYPE_30FPS : V4L2_TC_TYPE_25FPS;
  188. tc->flags = 0;
  189. tc->frames = seq % fps;
  190. tc->seconds = (seq / fps) % 60;
  191. tc->minutes = (seq / (60 * fps)) % 60;
  192. tc->hours = (seq / (60 * 60 * fps)) % 24;
  193. }
  194. static void vid_cap_buf_queue(struct vb2_buffer *vb)
  195. {
  196. struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
  197. struct vivid_dev *dev = vb2_get_drv_priv(vb->vb2_queue);
  198. struct vivid_buffer *buf = container_of(vbuf, struct vivid_buffer, vb);
  199. dprintk(dev, 1, "%s\n", __func__);
  200. spin_lock(&dev->slock);
  201. list_add_tail(&buf->list, &dev->vid_cap_active);
  202. spin_unlock(&dev->slock);
  203. }
  204. static int vid_cap_start_streaming(struct vb2_queue *vq, unsigned count)
  205. {
  206. struct vivid_dev *dev = vb2_get_drv_priv(vq);
  207. unsigned i;
  208. int err;
  209. if (vb2_is_streaming(&dev->vb_vid_out_q))
  210. dev->can_loop_video = vivid_vid_can_loop(dev);
  211. dev->vid_cap_seq_count = 0;
  212. dprintk(dev, 1, "%s\n", __func__);
  213. for (i = 0; i < VIDEO_MAX_FRAME; i++)
  214. dev->must_blank[i] = tpg_g_perc_fill(&dev->tpg) < 100;
  215. if (dev->start_streaming_error) {
  216. dev->start_streaming_error = false;
  217. err = -EINVAL;
  218. } else {
  219. err = vivid_start_generating_vid_cap(dev, &dev->vid_cap_streaming);
  220. }
  221. if (err) {
  222. struct vivid_buffer *buf, *tmp;
  223. list_for_each_entry_safe(buf, tmp, &dev->vid_cap_active, list) {
  224. list_del(&buf->list);
  225. vb2_buffer_done(&buf->vb.vb2_buf,
  226. VB2_BUF_STATE_QUEUED);
  227. }
  228. }
  229. return err;
  230. }
  231. /* abort streaming and wait for last buffer */
  232. static void vid_cap_stop_streaming(struct vb2_queue *vq)
  233. {
  234. struct vivid_dev *dev = vb2_get_drv_priv(vq);
  235. dprintk(dev, 1, "%s\n", __func__);
  236. vivid_stop_generating_vid_cap(dev, &dev->vid_cap_streaming);
  237. dev->can_loop_video = false;
  238. }
  239. const struct vb2_ops vivid_vid_cap_qops = {
  240. .queue_setup = vid_cap_queue_setup,
  241. .buf_prepare = vid_cap_buf_prepare,
  242. .buf_finish = vid_cap_buf_finish,
  243. .buf_queue = vid_cap_buf_queue,
  244. .start_streaming = vid_cap_start_streaming,
  245. .stop_streaming = vid_cap_stop_streaming,
  246. .wait_prepare = vb2_ops_wait_prepare,
  247. .wait_finish = vb2_ops_wait_finish,
  248. };
  249. /*
  250. * Determine the 'picture' quality based on the current TV frequency: either
  251. * COLOR for a good 'signal', GRAY (grayscale picture) for a slightly off
  252. * signal or NOISE for no signal.
  253. */
  254. void vivid_update_quality(struct vivid_dev *dev)
  255. {
  256. unsigned freq_modulus;
  257. if (dev->loop_video && (vivid_is_svid_cap(dev) || vivid_is_hdmi_cap(dev))) {
  258. /*
  259. * The 'noise' will only be replaced by the actual video
  260. * if the output video matches the input video settings.
  261. */
  262. tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
  263. return;
  264. }
  265. if (vivid_is_hdmi_cap(dev) && VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode)) {
  266. tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
  267. return;
  268. }
  269. if (vivid_is_sdtv_cap(dev) && VIVID_INVALID_SIGNAL(dev->std_signal_mode)) {
  270. tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE, 0);
  271. return;
  272. }
  273. if (!vivid_is_tv_cap(dev)) {
  274. tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
  275. return;
  276. }
  277. /*
  278. * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
  279. * From +/- 0.25 MHz around the channel there is color, and from
  280. * +/- 1 MHz there is grayscale (chroma is lost).
  281. * Everywhere else it is just noise.
  282. */
  283. freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
  284. if (freq_modulus > 2 * 16) {
  285. tpg_s_quality(&dev->tpg, TPG_QUAL_NOISE,
  286. next_pseudo_random32(dev->tv_freq ^ 0x55) & 0x3f);
  287. return;
  288. }
  289. if (freq_modulus < 12 /*0.75 * 16*/ || freq_modulus > 20 /*1.25 * 16*/)
  290. tpg_s_quality(&dev->tpg, TPG_QUAL_GRAY, 0);
  291. else
  292. tpg_s_quality(&dev->tpg, TPG_QUAL_COLOR, 0);
  293. }
  294. /*
  295. * Get the current picture quality and the associated afc value.
  296. */
  297. static enum tpg_quality vivid_get_quality(struct vivid_dev *dev, s32 *afc)
  298. {
  299. unsigned freq_modulus;
  300. if (afc)
  301. *afc = 0;
  302. if (tpg_g_quality(&dev->tpg) == TPG_QUAL_COLOR ||
  303. tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE)
  304. return tpg_g_quality(&dev->tpg);
  305. /*
  306. * There is a fake channel every 6 MHz at 49.25, 55.25, etc.
  307. * From +/- 0.25 MHz around the channel there is color, and from
  308. * +/- 1 MHz there is grayscale (chroma is lost).
  309. * Everywhere else it is just gray.
  310. */
  311. freq_modulus = (dev->tv_freq - 676 /* (43.25-1) * 16 */) % (6 * 16);
  312. if (afc)
  313. *afc = freq_modulus - 1 * 16;
  314. return TPG_QUAL_GRAY;
  315. }
  316. enum tpg_video_aspect vivid_get_video_aspect(const struct vivid_dev *dev)
  317. {
  318. if (vivid_is_sdtv_cap(dev))
  319. return dev->std_aspect_ratio;
  320. if (vivid_is_hdmi_cap(dev))
  321. return dev->dv_timings_aspect_ratio;
  322. return TPG_VIDEO_ASPECT_IMAGE;
  323. }
  324. static enum tpg_pixel_aspect vivid_get_pixel_aspect(const struct vivid_dev *dev)
  325. {
  326. if (vivid_is_sdtv_cap(dev))
  327. return (dev->std_cap & V4L2_STD_525_60) ?
  328. TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
  329. if (vivid_is_hdmi_cap(dev) &&
  330. dev->src_rect.width == 720 && dev->src_rect.height <= 576)
  331. return dev->src_rect.height == 480 ?
  332. TPG_PIXEL_ASPECT_NTSC : TPG_PIXEL_ASPECT_PAL;
  333. return TPG_PIXEL_ASPECT_SQUARE;
  334. }
  335. /*
  336. * Called whenever the format has to be reset which can occur when
  337. * changing inputs, standard, timings, etc.
  338. */
  339. void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls)
  340. {
  341. struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
  342. unsigned size;
  343. u64 pixelclock;
  344. switch (dev->input_type[dev->input]) {
  345. case WEBCAM:
  346. default:
  347. dev->src_rect.width = webcam_sizes[dev->webcam_size_idx].width;
  348. dev->src_rect.height = webcam_sizes[dev->webcam_size_idx].height;
  349. dev->timeperframe_vid_cap = webcam_intervals[dev->webcam_ival_idx];
  350. dev->field_cap = V4L2_FIELD_NONE;
  351. tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
  352. break;
  353. case TV:
  354. case SVID:
  355. dev->field_cap = dev->tv_field_cap;
  356. dev->src_rect.width = 720;
  357. if (dev->std_cap & V4L2_STD_525_60) {
  358. dev->src_rect.height = 480;
  359. dev->timeperframe_vid_cap = (struct v4l2_fract) { 1001, 30000 };
  360. dev->service_set_cap = V4L2_SLICED_CAPTION_525;
  361. } else {
  362. dev->src_rect.height = 576;
  363. dev->timeperframe_vid_cap = (struct v4l2_fract) { 1000, 25000 };
  364. dev->service_set_cap = V4L2_SLICED_WSS_625 | V4L2_SLICED_TELETEXT_B;
  365. }
  366. tpg_s_rgb_range(&dev->tpg, V4L2_DV_RGB_RANGE_AUTO);
  367. break;
  368. case HDMI:
  369. dev->src_rect.width = bt->width;
  370. dev->src_rect.height = bt->height;
  371. size = V4L2_DV_BT_FRAME_WIDTH(bt) * V4L2_DV_BT_FRAME_HEIGHT(bt);
  372. if (dev->reduced_fps && can_reduce_fps(bt)) {
  373. pixelclock = div_u64(bt->pixelclock * 1000, 1001);
  374. bt->flags |= V4L2_DV_FL_REDUCED_FPS;
  375. } else {
  376. pixelclock = bt->pixelclock;
  377. bt->flags &= ~V4L2_DV_FL_REDUCED_FPS;
  378. }
  379. dev->timeperframe_vid_cap = (struct v4l2_fract) {
  380. size / 100, (u32)pixelclock / 100
  381. };
  382. if (bt->interlaced)
  383. dev->field_cap = V4L2_FIELD_ALTERNATE;
  384. else
  385. dev->field_cap = V4L2_FIELD_NONE;
  386. /*
  387. * We can be called from within s_ctrl, in that case we can't
  388. * set/get controls. Luckily we don't need to in that case.
  389. */
  390. if (keep_controls || !dev->colorspace)
  391. break;
  392. if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
  393. if (bt->width == 720 && bt->height <= 576)
  394. v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
  395. else
  396. v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
  397. v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 1);
  398. } else {
  399. v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
  400. v4l2_ctrl_s_ctrl(dev->real_rgb_range_cap, 0);
  401. }
  402. tpg_s_rgb_range(&dev->tpg, v4l2_ctrl_g_ctrl(dev->rgb_range_cap));
  403. break;
  404. }
  405. vfree(dev->bitmap_cap);
  406. dev->bitmap_cap = NULL;
  407. vivid_update_quality(dev);
  408. tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap);
  409. dev->crop_cap = dev->src_rect;
  410. dev->crop_bounds_cap = dev->src_rect;
  411. dev->compose_cap = dev->crop_cap;
  412. if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap))
  413. dev->compose_cap.height /= 2;
  414. dev->fmt_cap_rect = dev->compose_cap;
  415. tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev));
  416. tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev));
  417. tpg_update_mv_step(&dev->tpg);
  418. }
  419. /* Map the field to something that is valid for the current input */
  420. static enum v4l2_field vivid_field_cap(struct vivid_dev *dev, enum v4l2_field field)
  421. {
  422. if (vivid_is_sdtv_cap(dev)) {
  423. switch (field) {
  424. case V4L2_FIELD_INTERLACED_TB:
  425. case V4L2_FIELD_INTERLACED_BT:
  426. case V4L2_FIELD_SEQ_TB:
  427. case V4L2_FIELD_SEQ_BT:
  428. case V4L2_FIELD_TOP:
  429. case V4L2_FIELD_BOTTOM:
  430. case V4L2_FIELD_ALTERNATE:
  431. return field;
  432. case V4L2_FIELD_INTERLACED:
  433. default:
  434. return V4L2_FIELD_INTERLACED;
  435. }
  436. }
  437. if (vivid_is_hdmi_cap(dev))
  438. return dev->dv_timings_cap.bt.interlaced ? V4L2_FIELD_ALTERNATE :
  439. V4L2_FIELD_NONE;
  440. return V4L2_FIELD_NONE;
  441. }
  442. static unsigned vivid_colorspace_cap(struct vivid_dev *dev)
  443. {
  444. if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
  445. return tpg_g_colorspace(&dev->tpg);
  446. return dev->colorspace_out;
  447. }
  448. static unsigned vivid_xfer_func_cap(struct vivid_dev *dev)
  449. {
  450. if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
  451. return tpg_g_xfer_func(&dev->tpg);
  452. return dev->xfer_func_out;
  453. }
  454. static unsigned vivid_ycbcr_enc_cap(struct vivid_dev *dev)
  455. {
  456. if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
  457. return tpg_g_ycbcr_enc(&dev->tpg);
  458. return dev->ycbcr_enc_out;
  459. }
  460. static unsigned int vivid_hsv_enc_cap(struct vivid_dev *dev)
  461. {
  462. if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
  463. return tpg_g_hsv_enc(&dev->tpg);
  464. return dev->hsv_enc_out;
  465. }
  466. static unsigned vivid_quantization_cap(struct vivid_dev *dev)
  467. {
  468. if (!dev->loop_video || vivid_is_webcam(dev) || vivid_is_tv_cap(dev))
  469. return tpg_g_quantization(&dev->tpg);
  470. return dev->quantization_out;
  471. }
  472. int vivid_g_fmt_vid_cap(struct file *file, void *priv,
  473. struct v4l2_format *f)
  474. {
  475. struct vivid_dev *dev = video_drvdata(file);
  476. struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
  477. unsigned p;
  478. mp->width = dev->fmt_cap_rect.width;
  479. mp->height = dev->fmt_cap_rect.height;
  480. mp->field = dev->field_cap;
  481. mp->pixelformat = dev->fmt_cap->fourcc;
  482. mp->colorspace = vivid_colorspace_cap(dev);
  483. mp->xfer_func = vivid_xfer_func_cap(dev);
  484. if (dev->fmt_cap->color_enc == TGP_COLOR_ENC_HSV)
  485. mp->hsv_enc = vivid_hsv_enc_cap(dev);
  486. else
  487. mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
  488. mp->quantization = vivid_quantization_cap(dev);
  489. mp->num_planes = dev->fmt_cap->buffers;
  490. for (p = 0; p < mp->num_planes; p++) {
  491. mp->plane_fmt[p].bytesperline = tpg_g_bytesperline(&dev->tpg, p);
  492. mp->plane_fmt[p].sizeimage =
  493. tpg_g_line_width(&dev->tpg, p) * mp->height +
  494. dev->fmt_cap->data_offset[p];
  495. }
  496. return 0;
  497. }
  498. int vivid_try_fmt_vid_cap(struct file *file, void *priv,
  499. struct v4l2_format *f)
  500. {
  501. struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
  502. struct v4l2_plane_pix_format *pfmt = mp->plane_fmt;
  503. struct vivid_dev *dev = video_drvdata(file);
  504. const struct vivid_fmt *fmt;
  505. unsigned bytesperline, max_bpl;
  506. unsigned factor = 1;
  507. unsigned w, h;
  508. unsigned p;
  509. fmt = vivid_get_format(dev, mp->pixelformat);
  510. if (!fmt) {
  511. dprintk(dev, 1, "Fourcc format (0x%08x) unknown.\n",
  512. mp->pixelformat);
  513. mp->pixelformat = V4L2_PIX_FMT_YUYV;
  514. fmt = vivid_get_format(dev, mp->pixelformat);
  515. }
  516. mp->field = vivid_field_cap(dev, mp->field);
  517. if (vivid_is_webcam(dev)) {
  518. const struct v4l2_frmsize_discrete *sz =
  519. v4l2_find_nearest_format(&webcam_probe, mp->width, mp->height);
  520. w = sz->width;
  521. h = sz->height;
  522. } else if (vivid_is_sdtv_cap(dev)) {
  523. w = 720;
  524. h = (dev->std_cap & V4L2_STD_525_60) ? 480 : 576;
  525. } else {
  526. w = dev->src_rect.width;
  527. h = dev->src_rect.height;
  528. }
  529. if (V4L2_FIELD_HAS_T_OR_B(mp->field))
  530. factor = 2;
  531. if (vivid_is_webcam(dev) ||
  532. (!dev->has_scaler_cap && !dev->has_crop_cap && !dev->has_compose_cap)) {
  533. mp->width = w;
  534. mp->height = h / factor;
  535. } else {
  536. struct v4l2_rect r = { 0, 0, mp->width, mp->height * factor };
  537. v4l2_rect_set_min_size(&r, &vivid_min_rect);
  538. v4l2_rect_set_max_size(&r, &vivid_max_rect);
  539. if (dev->has_scaler_cap && !dev->has_compose_cap) {
  540. struct v4l2_rect max_r = { 0, 0, MAX_ZOOM * w, MAX_ZOOM * h };
  541. v4l2_rect_set_max_size(&r, &max_r);
  542. } else if (!dev->has_scaler_cap && dev->has_crop_cap && !dev->has_compose_cap) {
  543. v4l2_rect_set_max_size(&r, &dev->src_rect);
  544. } else if (!dev->has_scaler_cap && !dev->has_crop_cap) {
  545. v4l2_rect_set_min_size(&r, &dev->src_rect);
  546. }
  547. mp->width = r.width;
  548. mp->height = r.height / factor;
  549. }
  550. /* This driver supports custom bytesperline values */
  551. mp->num_planes = fmt->buffers;
  552. for (p = 0; p < fmt->buffers; p++) {
  553. /* Calculate the minimum supported bytesperline value */
  554. bytesperline = (mp->width * fmt->bit_depth[p]) >> 3;
  555. /* Calculate the maximum supported bytesperline value */
  556. max_bpl = (MAX_ZOOM * MAX_WIDTH * fmt->bit_depth[p]) >> 3;
  557. if (pfmt[p].bytesperline > max_bpl)
  558. pfmt[p].bytesperline = max_bpl;
  559. if (pfmt[p].bytesperline < bytesperline)
  560. pfmt[p].bytesperline = bytesperline;
  561. pfmt[p].sizeimage = (pfmt[p].bytesperline * mp->height) /
  562. fmt->vdownsampling[p] + fmt->data_offset[p];
  563. memset(pfmt[p].reserved, 0, sizeof(pfmt[p].reserved));
  564. }
  565. for (p = fmt->buffers; p < fmt->planes; p++)
  566. pfmt[0].sizeimage += (pfmt[0].bytesperline * mp->height *
  567. (fmt->bit_depth[p] / fmt->vdownsampling[p])) /
  568. (fmt->bit_depth[0] / fmt->vdownsampling[0]);
  569. mp->colorspace = vivid_colorspace_cap(dev);
  570. if (fmt->color_enc == TGP_COLOR_ENC_HSV)
  571. mp->hsv_enc = vivid_hsv_enc_cap(dev);
  572. else
  573. mp->ycbcr_enc = vivid_ycbcr_enc_cap(dev);
  574. mp->xfer_func = vivid_xfer_func_cap(dev);
  575. mp->quantization = vivid_quantization_cap(dev);
  576. memset(mp->reserved, 0, sizeof(mp->reserved));
  577. return 0;
  578. }
  579. int vivid_s_fmt_vid_cap(struct file *file, void *priv,
  580. struct v4l2_format *f)
  581. {
  582. struct v4l2_pix_format_mplane *mp = &f->fmt.pix_mp;
  583. struct vivid_dev *dev = video_drvdata(file);
  584. struct v4l2_rect *crop = &dev->crop_cap;
  585. struct v4l2_rect *compose = &dev->compose_cap;
  586. struct vb2_queue *q = &dev->vb_vid_cap_q;
  587. int ret = vivid_try_fmt_vid_cap(file, priv, f);
  588. unsigned factor = 1;
  589. unsigned p;
  590. unsigned i;
  591. if (ret < 0)
  592. return ret;
  593. if (vb2_is_busy(q)) {
  594. dprintk(dev, 1, "%s device busy\n", __func__);
  595. return -EBUSY;
  596. }
  597. if (dev->overlay_cap_owner && dev->fb_cap.fmt.pixelformat != mp->pixelformat) {
  598. dprintk(dev, 1, "overlay is active, can't change pixelformat\n");
  599. return -EBUSY;
  600. }
  601. dev->fmt_cap = vivid_get_format(dev, mp->pixelformat);
  602. if (V4L2_FIELD_HAS_T_OR_B(mp->field))
  603. factor = 2;
  604. /* Note: the webcam input doesn't support scaling, cropping or composing */
  605. if (!vivid_is_webcam(dev) &&
  606. (dev->has_scaler_cap || dev->has_crop_cap || dev->has_compose_cap)) {
  607. struct v4l2_rect r = { 0, 0, mp->width, mp->height };
  608. if (dev->has_scaler_cap) {
  609. if (dev->has_compose_cap)
  610. v4l2_rect_map_inside(compose, &r);
  611. else
  612. *compose = r;
  613. if (dev->has_crop_cap && !dev->has_compose_cap) {
  614. struct v4l2_rect min_r = {
  615. 0, 0,
  616. r.width / MAX_ZOOM,
  617. factor * r.height / MAX_ZOOM
  618. };
  619. struct v4l2_rect max_r = {
  620. 0, 0,
  621. r.width * MAX_ZOOM,
  622. factor * r.height * MAX_ZOOM
  623. };
  624. v4l2_rect_set_min_size(crop, &min_r);
  625. v4l2_rect_set_max_size(crop, &max_r);
  626. v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
  627. } else if (dev->has_crop_cap) {
  628. struct v4l2_rect min_r = {
  629. 0, 0,
  630. compose->width / MAX_ZOOM,
  631. factor * compose->height / MAX_ZOOM
  632. };
  633. struct v4l2_rect max_r = {
  634. 0, 0,
  635. compose->width * MAX_ZOOM,
  636. factor * compose->height * MAX_ZOOM
  637. };
  638. v4l2_rect_set_min_size(crop, &min_r);
  639. v4l2_rect_set_max_size(crop, &max_r);
  640. v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
  641. }
  642. } else if (dev->has_crop_cap && !dev->has_compose_cap) {
  643. r.height *= factor;
  644. v4l2_rect_set_size_to(crop, &r);
  645. v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
  646. r = *crop;
  647. r.height /= factor;
  648. v4l2_rect_set_size_to(compose, &r);
  649. } else if (!dev->has_crop_cap) {
  650. v4l2_rect_map_inside(compose, &r);
  651. } else {
  652. r.height *= factor;
  653. v4l2_rect_set_max_size(crop, &r);
  654. v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
  655. compose->top *= factor;
  656. compose->height *= factor;
  657. v4l2_rect_set_size_to(compose, crop);
  658. v4l2_rect_map_inside(compose, &r);
  659. compose->top /= factor;
  660. compose->height /= factor;
  661. }
  662. } else if (vivid_is_webcam(dev)) {
  663. /* Guaranteed to be a match */
  664. for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
  665. if (webcam_sizes[i].width == mp->width &&
  666. webcam_sizes[i].height == mp->height)
  667. break;
  668. dev->webcam_size_idx = i;
  669. if (dev->webcam_ival_idx >= 2 * (VIVID_WEBCAM_SIZES - i))
  670. dev->webcam_ival_idx = 2 * (VIVID_WEBCAM_SIZES - i) - 1;
  671. vivid_update_format_cap(dev, false);
  672. } else {
  673. struct v4l2_rect r = { 0, 0, mp->width, mp->height };
  674. v4l2_rect_set_size_to(compose, &r);
  675. r.height *= factor;
  676. v4l2_rect_set_size_to(crop, &r);
  677. }
  678. dev->fmt_cap_rect.width = mp->width;
  679. dev->fmt_cap_rect.height = mp->height;
  680. tpg_s_buf_height(&dev->tpg, mp->height);
  681. tpg_s_fourcc(&dev->tpg, dev->fmt_cap->fourcc);
  682. for (p = 0; p < tpg_g_buffers(&dev->tpg); p++)
  683. tpg_s_bytesperline(&dev->tpg, p, mp->plane_fmt[p].bytesperline);
  684. dev->field_cap = mp->field;
  685. if (dev->field_cap == V4L2_FIELD_ALTERNATE)
  686. tpg_s_field(&dev->tpg, V4L2_FIELD_TOP, true);
  687. else
  688. tpg_s_field(&dev->tpg, dev->field_cap, false);
  689. tpg_s_crop_compose(&dev->tpg, &dev->crop_cap, &dev->compose_cap);
  690. if (vivid_is_sdtv_cap(dev))
  691. dev->tv_field_cap = mp->field;
  692. tpg_update_mv_step(&dev->tpg);
  693. return 0;
  694. }
  695. int vidioc_g_fmt_vid_cap_mplane(struct file *file, void *priv,
  696. struct v4l2_format *f)
  697. {
  698. struct vivid_dev *dev = video_drvdata(file);
  699. if (!dev->multiplanar)
  700. return -ENOTTY;
  701. return vivid_g_fmt_vid_cap(file, priv, f);
  702. }
  703. int vidioc_try_fmt_vid_cap_mplane(struct file *file, void *priv,
  704. struct v4l2_format *f)
  705. {
  706. struct vivid_dev *dev = video_drvdata(file);
  707. if (!dev->multiplanar)
  708. return -ENOTTY;
  709. return vivid_try_fmt_vid_cap(file, priv, f);
  710. }
  711. int vidioc_s_fmt_vid_cap_mplane(struct file *file, void *priv,
  712. struct v4l2_format *f)
  713. {
  714. struct vivid_dev *dev = video_drvdata(file);
  715. if (!dev->multiplanar)
  716. return -ENOTTY;
  717. return vivid_s_fmt_vid_cap(file, priv, f);
  718. }
  719. int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
  720. struct v4l2_format *f)
  721. {
  722. struct vivid_dev *dev = video_drvdata(file);
  723. if (dev->multiplanar)
  724. return -ENOTTY;
  725. return fmt_sp2mp_func(file, priv, f, vivid_g_fmt_vid_cap);
  726. }
  727. int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
  728. struct v4l2_format *f)
  729. {
  730. struct vivid_dev *dev = video_drvdata(file);
  731. if (dev->multiplanar)
  732. return -ENOTTY;
  733. return fmt_sp2mp_func(file, priv, f, vivid_try_fmt_vid_cap);
  734. }
  735. int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
  736. struct v4l2_format *f)
  737. {
  738. struct vivid_dev *dev = video_drvdata(file);
  739. if (dev->multiplanar)
  740. return -ENOTTY;
  741. return fmt_sp2mp_func(file, priv, f, vivid_s_fmt_vid_cap);
  742. }
  743. int vivid_vid_cap_g_selection(struct file *file, void *priv,
  744. struct v4l2_selection *sel)
  745. {
  746. struct vivid_dev *dev = video_drvdata(file);
  747. if (!dev->has_crop_cap && !dev->has_compose_cap)
  748. return -ENOTTY;
  749. if (sel->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
  750. return -EINVAL;
  751. if (vivid_is_webcam(dev))
  752. return -ENODATA;
  753. sel->r.left = sel->r.top = 0;
  754. switch (sel->target) {
  755. case V4L2_SEL_TGT_CROP:
  756. if (!dev->has_crop_cap)
  757. return -EINVAL;
  758. sel->r = dev->crop_cap;
  759. break;
  760. case V4L2_SEL_TGT_CROP_DEFAULT:
  761. case V4L2_SEL_TGT_CROP_BOUNDS:
  762. if (!dev->has_crop_cap)
  763. return -EINVAL;
  764. sel->r = dev->src_rect;
  765. break;
  766. case V4L2_SEL_TGT_COMPOSE_BOUNDS:
  767. if (!dev->has_compose_cap)
  768. return -EINVAL;
  769. sel->r = vivid_max_rect;
  770. break;
  771. case V4L2_SEL_TGT_COMPOSE:
  772. if (!dev->has_compose_cap)
  773. return -EINVAL;
  774. sel->r = dev->compose_cap;
  775. break;
  776. case V4L2_SEL_TGT_COMPOSE_DEFAULT:
  777. if (!dev->has_compose_cap)
  778. return -EINVAL;
  779. sel->r = dev->fmt_cap_rect;
  780. break;
  781. default:
  782. return -EINVAL;
  783. }
  784. return 0;
  785. }
  786. int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s)
  787. {
  788. struct vivid_dev *dev = video_drvdata(file);
  789. struct v4l2_rect *crop = &dev->crop_cap;
  790. struct v4l2_rect *compose = &dev->compose_cap;
  791. unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
  792. int ret;
  793. if (!dev->has_crop_cap && !dev->has_compose_cap)
  794. return -ENOTTY;
  795. if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
  796. return -EINVAL;
  797. if (vivid_is_webcam(dev))
  798. return -ENODATA;
  799. switch (s->target) {
  800. case V4L2_SEL_TGT_CROP:
  801. if (!dev->has_crop_cap)
  802. return -EINVAL;
  803. ret = vivid_vid_adjust_sel(s->flags, &s->r);
  804. if (ret)
  805. return ret;
  806. v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
  807. v4l2_rect_set_max_size(&s->r, &dev->src_rect);
  808. v4l2_rect_map_inside(&s->r, &dev->crop_bounds_cap);
  809. s->r.top /= factor;
  810. s->r.height /= factor;
  811. if (dev->has_scaler_cap) {
  812. struct v4l2_rect fmt = dev->fmt_cap_rect;
  813. struct v4l2_rect max_rect = {
  814. 0, 0,
  815. s->r.width * MAX_ZOOM,
  816. s->r.height * MAX_ZOOM
  817. };
  818. struct v4l2_rect min_rect = {
  819. 0, 0,
  820. s->r.width / MAX_ZOOM,
  821. s->r.height / MAX_ZOOM
  822. };
  823. v4l2_rect_set_min_size(&fmt, &min_rect);
  824. if (!dev->has_compose_cap)
  825. v4l2_rect_set_max_size(&fmt, &max_rect);
  826. if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
  827. vb2_is_busy(&dev->vb_vid_cap_q))
  828. return -EBUSY;
  829. if (dev->has_compose_cap) {
  830. v4l2_rect_set_min_size(compose, &min_rect);
  831. v4l2_rect_set_max_size(compose, &max_rect);
  832. }
  833. dev->fmt_cap_rect = fmt;
  834. tpg_s_buf_height(&dev->tpg, fmt.height);
  835. } else if (dev->has_compose_cap) {
  836. struct v4l2_rect fmt = dev->fmt_cap_rect;
  837. v4l2_rect_set_min_size(&fmt, &s->r);
  838. if (!v4l2_rect_same_size(&dev->fmt_cap_rect, &fmt) &&
  839. vb2_is_busy(&dev->vb_vid_cap_q))
  840. return -EBUSY;
  841. dev->fmt_cap_rect = fmt;
  842. tpg_s_buf_height(&dev->tpg, fmt.height);
  843. v4l2_rect_set_size_to(compose, &s->r);
  844. v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
  845. } else {
  846. if (!v4l2_rect_same_size(&s->r, &dev->fmt_cap_rect) &&
  847. vb2_is_busy(&dev->vb_vid_cap_q))
  848. return -EBUSY;
  849. v4l2_rect_set_size_to(&dev->fmt_cap_rect, &s->r);
  850. v4l2_rect_set_size_to(compose, &s->r);
  851. v4l2_rect_map_inside(compose, &dev->fmt_cap_rect);
  852. tpg_s_buf_height(&dev->tpg, dev->fmt_cap_rect.height);
  853. }
  854. s->r.top *= factor;
  855. s->r.height *= factor;
  856. *crop = s->r;
  857. break;
  858. case V4L2_SEL_TGT_COMPOSE:
  859. if (!dev->has_compose_cap)
  860. return -EINVAL;
  861. ret = vivid_vid_adjust_sel(s->flags, &s->r);
  862. if (ret)
  863. return ret;
  864. v4l2_rect_set_min_size(&s->r, &vivid_min_rect);
  865. v4l2_rect_set_max_size(&s->r, &dev->fmt_cap_rect);
  866. if (dev->has_scaler_cap) {
  867. struct v4l2_rect max_rect = {
  868. 0, 0,
  869. dev->src_rect.width * MAX_ZOOM,
  870. (dev->src_rect.height / factor) * MAX_ZOOM
  871. };
  872. v4l2_rect_set_max_size(&s->r, &max_rect);
  873. if (dev->has_crop_cap) {
  874. struct v4l2_rect min_rect = {
  875. 0, 0,
  876. s->r.width / MAX_ZOOM,
  877. (s->r.height * factor) / MAX_ZOOM
  878. };
  879. struct v4l2_rect max_rect = {
  880. 0, 0,
  881. s->r.width * MAX_ZOOM,
  882. (s->r.height * factor) * MAX_ZOOM
  883. };
  884. v4l2_rect_set_min_size(crop, &min_rect);
  885. v4l2_rect_set_max_size(crop, &max_rect);
  886. v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
  887. }
  888. } else if (dev->has_crop_cap) {
  889. s->r.top *= factor;
  890. s->r.height *= factor;
  891. v4l2_rect_set_max_size(&s->r, &dev->src_rect);
  892. v4l2_rect_set_size_to(crop, &s->r);
  893. v4l2_rect_map_inside(crop, &dev->crop_bounds_cap);
  894. s->r.top /= factor;
  895. s->r.height /= factor;
  896. } else {
  897. v4l2_rect_set_size_to(&s->r, &dev->src_rect);
  898. s->r.height /= factor;
  899. }
  900. v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect);
  901. if (dev->bitmap_cap && (compose->width != s->r.width ||
  902. compose->height != s->r.height)) {
  903. vfree(dev->bitmap_cap);
  904. dev->bitmap_cap = NULL;
  905. }
  906. *compose = s->r;
  907. break;
  908. default:
  909. return -EINVAL;
  910. }
  911. tpg_s_crop_compose(&dev->tpg, crop, compose);
  912. return 0;
  913. }
  914. int vivid_vid_cap_cropcap(struct file *file, void *priv,
  915. struct v4l2_cropcap *cap)
  916. {
  917. struct vivid_dev *dev = video_drvdata(file);
  918. if (cap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
  919. return -EINVAL;
  920. switch (vivid_get_pixel_aspect(dev)) {
  921. case TPG_PIXEL_ASPECT_NTSC:
  922. cap->pixelaspect.numerator = 11;
  923. cap->pixelaspect.denominator = 10;
  924. break;
  925. case TPG_PIXEL_ASPECT_PAL:
  926. cap->pixelaspect.numerator = 54;
  927. cap->pixelaspect.denominator = 59;
  928. break;
  929. case TPG_PIXEL_ASPECT_SQUARE:
  930. cap->pixelaspect.numerator = 1;
  931. cap->pixelaspect.denominator = 1;
  932. break;
  933. }
  934. return 0;
  935. }
  936. int vidioc_enum_fmt_vid_overlay(struct file *file, void *priv,
  937. struct v4l2_fmtdesc *f)
  938. {
  939. struct vivid_dev *dev = video_drvdata(file);
  940. const struct vivid_fmt *fmt;
  941. if (dev->multiplanar)
  942. return -ENOTTY;
  943. if (f->index >= ARRAY_SIZE(formats_ovl))
  944. return -EINVAL;
  945. fmt = &formats_ovl[f->index];
  946. f->pixelformat = fmt->fourcc;
  947. return 0;
  948. }
  949. int vidioc_g_fmt_vid_overlay(struct file *file, void *priv,
  950. struct v4l2_format *f)
  951. {
  952. struct vivid_dev *dev = video_drvdata(file);
  953. const struct v4l2_rect *compose = &dev->compose_cap;
  954. struct v4l2_window *win = &f->fmt.win;
  955. unsigned clipcount = win->clipcount;
  956. if (dev->multiplanar)
  957. return -ENOTTY;
  958. win->w.top = dev->overlay_cap_top;
  959. win->w.left = dev->overlay_cap_left;
  960. win->w.width = compose->width;
  961. win->w.height = compose->height;
  962. win->field = dev->overlay_cap_field;
  963. win->clipcount = dev->clipcount_cap;
  964. if (clipcount > dev->clipcount_cap)
  965. clipcount = dev->clipcount_cap;
  966. if (dev->bitmap_cap == NULL)
  967. win->bitmap = NULL;
  968. else if (win->bitmap) {
  969. if (copy_to_user(win->bitmap, dev->bitmap_cap,
  970. ((compose->width + 7) / 8) * compose->height))
  971. return -EFAULT;
  972. }
  973. if (clipcount && win->clips) {
  974. if (copy_to_user(win->clips, dev->clips_cap,
  975. clipcount * sizeof(dev->clips_cap[0])))
  976. return -EFAULT;
  977. }
  978. return 0;
  979. }
  980. int vidioc_try_fmt_vid_overlay(struct file *file, void *priv,
  981. struct v4l2_format *f)
  982. {
  983. struct vivid_dev *dev = video_drvdata(file);
  984. const struct v4l2_rect *compose = &dev->compose_cap;
  985. struct v4l2_window *win = &f->fmt.win;
  986. int i, j;
  987. if (dev->multiplanar)
  988. return -ENOTTY;
  989. win->w.left = clamp_t(int, win->w.left,
  990. -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
  991. win->w.top = clamp_t(int, win->w.top,
  992. -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
  993. win->w.width = compose->width;
  994. win->w.height = compose->height;
  995. if (win->field != V4L2_FIELD_BOTTOM && win->field != V4L2_FIELD_TOP)
  996. win->field = V4L2_FIELD_ANY;
  997. win->chromakey = 0;
  998. win->global_alpha = 0;
  999. if (win->clipcount && !win->clips)
  1000. win->clipcount = 0;
  1001. if (win->clipcount > MAX_CLIPS)
  1002. win->clipcount = MAX_CLIPS;
  1003. if (win->clipcount) {
  1004. if (copy_from_user(dev->try_clips_cap, win->clips,
  1005. win->clipcount * sizeof(dev->clips_cap[0])))
  1006. return -EFAULT;
  1007. for (i = 0; i < win->clipcount; i++) {
  1008. struct v4l2_rect *r = &dev->try_clips_cap[i].c;
  1009. r->top = clamp_t(s32, r->top, 0, dev->fb_cap.fmt.height - 1);
  1010. r->height = clamp_t(s32, r->height, 1, dev->fb_cap.fmt.height - r->top);
  1011. r->left = clamp_t(u32, r->left, 0, dev->fb_cap.fmt.width - 1);
  1012. r->width = clamp_t(u32, r->width, 1, dev->fb_cap.fmt.width - r->left);
  1013. }
  1014. /*
  1015. * Yeah, so sue me, it's an O(n^2) algorithm. But n is a small
  1016. * number and it's typically a one-time deal.
  1017. */
  1018. for (i = 0; i < win->clipcount - 1; i++) {
  1019. struct v4l2_rect *r1 = &dev->try_clips_cap[i].c;
  1020. for (j = i + 1; j < win->clipcount; j++) {
  1021. struct v4l2_rect *r2 = &dev->try_clips_cap[j].c;
  1022. if (v4l2_rect_overlap(r1, r2))
  1023. return -EINVAL;
  1024. }
  1025. }
  1026. if (copy_to_user(win->clips, dev->try_clips_cap,
  1027. win->clipcount * sizeof(dev->clips_cap[0])))
  1028. return -EFAULT;
  1029. }
  1030. return 0;
  1031. }
  1032. int vidioc_s_fmt_vid_overlay(struct file *file, void *priv,
  1033. struct v4l2_format *f)
  1034. {
  1035. struct vivid_dev *dev = video_drvdata(file);
  1036. const struct v4l2_rect *compose = &dev->compose_cap;
  1037. struct v4l2_window *win = &f->fmt.win;
  1038. int ret = vidioc_try_fmt_vid_overlay(file, priv, f);
  1039. unsigned bitmap_size = ((compose->width + 7) / 8) * compose->height;
  1040. unsigned clips_size = win->clipcount * sizeof(dev->clips_cap[0]);
  1041. void *new_bitmap = NULL;
  1042. if (ret)
  1043. return ret;
  1044. if (win->bitmap) {
  1045. new_bitmap = vzalloc(bitmap_size);
  1046. if (new_bitmap == NULL)
  1047. return -ENOMEM;
  1048. if (copy_from_user(new_bitmap, win->bitmap, bitmap_size)) {
  1049. vfree(new_bitmap);
  1050. return -EFAULT;
  1051. }
  1052. }
  1053. dev->overlay_cap_top = win->w.top;
  1054. dev->overlay_cap_left = win->w.left;
  1055. dev->overlay_cap_field = win->field;
  1056. vfree(dev->bitmap_cap);
  1057. dev->bitmap_cap = new_bitmap;
  1058. dev->clipcount_cap = win->clipcount;
  1059. if (dev->clipcount_cap)
  1060. memcpy(dev->clips_cap, dev->try_clips_cap, clips_size);
  1061. return 0;
  1062. }
  1063. int vivid_vid_cap_overlay(struct file *file, void *fh, unsigned i)
  1064. {
  1065. struct vivid_dev *dev = video_drvdata(file);
  1066. if (dev->multiplanar)
  1067. return -ENOTTY;
  1068. if (i && dev->fb_vbase_cap == NULL)
  1069. return -EINVAL;
  1070. if (i && dev->fb_cap.fmt.pixelformat != dev->fmt_cap->fourcc) {
  1071. dprintk(dev, 1, "mismatch between overlay and video capture pixelformats\n");
  1072. return -EINVAL;
  1073. }
  1074. if (dev->overlay_cap_owner && dev->overlay_cap_owner != fh)
  1075. return -EBUSY;
  1076. dev->overlay_cap_owner = i ? fh : NULL;
  1077. return 0;
  1078. }
  1079. int vivid_vid_cap_g_fbuf(struct file *file, void *fh,
  1080. struct v4l2_framebuffer *a)
  1081. {
  1082. struct vivid_dev *dev = video_drvdata(file);
  1083. if (dev->multiplanar)
  1084. return -ENOTTY;
  1085. *a = dev->fb_cap;
  1086. a->capability = V4L2_FBUF_CAP_BITMAP_CLIPPING |
  1087. V4L2_FBUF_CAP_LIST_CLIPPING;
  1088. a->flags = V4L2_FBUF_FLAG_PRIMARY;
  1089. a->fmt.field = V4L2_FIELD_NONE;
  1090. a->fmt.colorspace = V4L2_COLORSPACE_SRGB;
  1091. a->fmt.priv = 0;
  1092. return 0;
  1093. }
  1094. int vivid_vid_cap_s_fbuf(struct file *file, void *fh,
  1095. const struct v4l2_framebuffer *a)
  1096. {
  1097. struct vivid_dev *dev = video_drvdata(file);
  1098. const struct vivid_fmt *fmt;
  1099. if (dev->multiplanar)
  1100. return -ENOTTY;
  1101. if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RAWIO))
  1102. return -EPERM;
  1103. if (dev->overlay_cap_owner)
  1104. return -EBUSY;
  1105. if (a->base == NULL) {
  1106. dev->fb_cap.base = NULL;
  1107. dev->fb_vbase_cap = NULL;
  1108. return 0;
  1109. }
  1110. if (a->fmt.width < 48 || a->fmt.height < 32)
  1111. return -EINVAL;
  1112. fmt = vivid_get_format(dev, a->fmt.pixelformat);
  1113. if (!fmt || !fmt->can_do_overlay)
  1114. return -EINVAL;
  1115. if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8)
  1116. return -EINVAL;
  1117. if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage)
  1118. return -EINVAL;
  1119. dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base);
  1120. dev->fb_cap = *a;
  1121. dev->overlay_cap_left = clamp_t(int, dev->overlay_cap_left,
  1122. -dev->fb_cap.fmt.width, dev->fb_cap.fmt.width);
  1123. dev->overlay_cap_top = clamp_t(int, dev->overlay_cap_top,
  1124. -dev->fb_cap.fmt.height, dev->fb_cap.fmt.height);
  1125. return 0;
  1126. }
  1127. static const struct v4l2_audio vivid_audio_inputs[] = {
  1128. { 0, "TV", V4L2_AUDCAP_STEREO },
  1129. { 1, "Line-In", V4L2_AUDCAP_STEREO },
  1130. };
  1131. int vidioc_enum_input(struct file *file, void *priv,
  1132. struct v4l2_input *inp)
  1133. {
  1134. struct vivid_dev *dev = video_drvdata(file);
  1135. if (inp->index >= dev->num_inputs)
  1136. return -EINVAL;
  1137. inp->type = V4L2_INPUT_TYPE_CAMERA;
  1138. switch (dev->input_type[inp->index]) {
  1139. case WEBCAM:
  1140. snprintf(inp->name, sizeof(inp->name), "Webcam %u",
  1141. dev->input_name_counter[inp->index]);
  1142. inp->capabilities = 0;
  1143. break;
  1144. case TV:
  1145. snprintf(inp->name, sizeof(inp->name), "TV %u",
  1146. dev->input_name_counter[inp->index]);
  1147. inp->type = V4L2_INPUT_TYPE_TUNER;
  1148. inp->std = V4L2_STD_ALL;
  1149. if (dev->has_audio_inputs)
  1150. inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
  1151. inp->capabilities = V4L2_IN_CAP_STD;
  1152. break;
  1153. case SVID:
  1154. snprintf(inp->name, sizeof(inp->name), "S-Video %u",
  1155. dev->input_name_counter[inp->index]);
  1156. inp->std = V4L2_STD_ALL;
  1157. if (dev->has_audio_inputs)
  1158. inp->audioset = (1 << ARRAY_SIZE(vivid_audio_inputs)) - 1;
  1159. inp->capabilities = V4L2_IN_CAP_STD;
  1160. break;
  1161. case HDMI:
  1162. snprintf(inp->name, sizeof(inp->name), "HDMI %u",
  1163. dev->input_name_counter[inp->index]);
  1164. inp->capabilities = V4L2_IN_CAP_DV_TIMINGS;
  1165. if (dev->edid_blocks == 0 ||
  1166. dev->dv_timings_signal_mode == NO_SIGNAL)
  1167. inp->status |= V4L2_IN_ST_NO_SIGNAL;
  1168. else if (dev->dv_timings_signal_mode == NO_LOCK ||
  1169. dev->dv_timings_signal_mode == OUT_OF_RANGE)
  1170. inp->status |= V4L2_IN_ST_NO_H_LOCK;
  1171. break;
  1172. }
  1173. if (dev->sensor_hflip)
  1174. inp->status |= V4L2_IN_ST_HFLIP;
  1175. if (dev->sensor_vflip)
  1176. inp->status |= V4L2_IN_ST_VFLIP;
  1177. if (dev->input == inp->index && vivid_is_sdtv_cap(dev)) {
  1178. if (dev->std_signal_mode == NO_SIGNAL) {
  1179. inp->status |= V4L2_IN_ST_NO_SIGNAL;
  1180. } else if (dev->std_signal_mode == NO_LOCK) {
  1181. inp->status |= V4L2_IN_ST_NO_H_LOCK;
  1182. } else if (vivid_is_tv_cap(dev)) {
  1183. switch (tpg_g_quality(&dev->tpg)) {
  1184. case TPG_QUAL_GRAY:
  1185. inp->status |= V4L2_IN_ST_COLOR_KILL;
  1186. break;
  1187. case TPG_QUAL_NOISE:
  1188. inp->status |= V4L2_IN_ST_NO_H_LOCK;
  1189. break;
  1190. default:
  1191. break;
  1192. }
  1193. }
  1194. }
  1195. return 0;
  1196. }
  1197. int vidioc_g_input(struct file *file, void *priv, unsigned *i)
  1198. {
  1199. struct vivid_dev *dev = video_drvdata(file);
  1200. *i = dev->input;
  1201. return 0;
  1202. }
  1203. int vidioc_s_input(struct file *file, void *priv, unsigned i)
  1204. {
  1205. struct vivid_dev *dev = video_drvdata(file);
  1206. struct v4l2_bt_timings *bt = &dev->dv_timings_cap.bt;
  1207. unsigned brightness;
  1208. if (i >= dev->num_inputs)
  1209. return -EINVAL;
  1210. if (i == dev->input)
  1211. return 0;
  1212. if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
  1213. return -EBUSY;
  1214. dev->input = i;
  1215. dev->vid_cap_dev.tvnorms = 0;
  1216. if (dev->input_type[i] == TV || dev->input_type[i] == SVID) {
  1217. dev->tv_audio_input = (dev->input_type[i] == TV) ? 0 : 1;
  1218. dev->vid_cap_dev.tvnorms = V4L2_STD_ALL;
  1219. }
  1220. dev->vbi_cap_dev.tvnorms = dev->vid_cap_dev.tvnorms;
  1221. vivid_update_format_cap(dev, false);
  1222. if (dev->colorspace) {
  1223. switch (dev->input_type[i]) {
  1224. case WEBCAM:
  1225. v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
  1226. break;
  1227. case TV:
  1228. case SVID:
  1229. v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
  1230. break;
  1231. case HDMI:
  1232. if (bt->flags & V4L2_DV_FL_IS_CE_VIDEO) {
  1233. if (dev->src_rect.width == 720 && dev->src_rect.height <= 576)
  1234. v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_170M);
  1235. else
  1236. v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_709);
  1237. } else {
  1238. v4l2_ctrl_s_ctrl(dev->colorspace, VIVID_CS_SRGB);
  1239. }
  1240. break;
  1241. }
  1242. }
  1243. /*
  1244. * Modify the brightness range depending on the input.
  1245. * This makes it easy to use vivid to test if applications can
  1246. * handle control range modifications and is also how this is
  1247. * typically used in practice as different inputs may be hooked
  1248. * up to different receivers with different control ranges.
  1249. */
  1250. brightness = 128 * i + dev->input_brightness[i];
  1251. v4l2_ctrl_modify_range(dev->brightness,
  1252. 128 * i, 255 + 128 * i, 1, 128 + 128 * i);
  1253. v4l2_ctrl_s_ctrl(dev->brightness, brightness);
  1254. return 0;
  1255. }
  1256. int vidioc_enumaudio(struct file *file, void *fh, struct v4l2_audio *vin)
  1257. {
  1258. if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
  1259. return -EINVAL;
  1260. *vin = vivid_audio_inputs[vin->index];
  1261. return 0;
  1262. }
  1263. int vidioc_g_audio(struct file *file, void *fh, struct v4l2_audio *vin)
  1264. {
  1265. struct vivid_dev *dev = video_drvdata(file);
  1266. if (!vivid_is_sdtv_cap(dev))
  1267. return -EINVAL;
  1268. *vin = vivid_audio_inputs[dev->tv_audio_input];
  1269. return 0;
  1270. }
  1271. int vidioc_s_audio(struct file *file, void *fh, const struct v4l2_audio *vin)
  1272. {
  1273. struct vivid_dev *dev = video_drvdata(file);
  1274. if (!vivid_is_sdtv_cap(dev))
  1275. return -EINVAL;
  1276. if (vin->index >= ARRAY_SIZE(vivid_audio_inputs))
  1277. return -EINVAL;
  1278. dev->tv_audio_input = vin->index;
  1279. return 0;
  1280. }
  1281. int vivid_video_g_frequency(struct file *file, void *fh, struct v4l2_frequency *vf)
  1282. {
  1283. struct vivid_dev *dev = video_drvdata(file);
  1284. if (vf->tuner != 0)
  1285. return -EINVAL;
  1286. vf->frequency = dev->tv_freq;
  1287. return 0;
  1288. }
  1289. int vivid_video_s_frequency(struct file *file, void *fh, const struct v4l2_frequency *vf)
  1290. {
  1291. struct vivid_dev *dev = video_drvdata(file);
  1292. if (vf->tuner != 0)
  1293. return -EINVAL;
  1294. dev->tv_freq = clamp_t(unsigned, vf->frequency, MIN_TV_FREQ, MAX_TV_FREQ);
  1295. if (vivid_is_tv_cap(dev))
  1296. vivid_update_quality(dev);
  1297. return 0;
  1298. }
  1299. int vivid_video_s_tuner(struct file *file, void *fh, const struct v4l2_tuner *vt)
  1300. {
  1301. struct vivid_dev *dev = video_drvdata(file);
  1302. if (vt->index != 0)
  1303. return -EINVAL;
  1304. if (vt->audmode > V4L2_TUNER_MODE_LANG1_LANG2)
  1305. return -EINVAL;
  1306. dev->tv_audmode = vt->audmode;
  1307. return 0;
  1308. }
  1309. int vivid_video_g_tuner(struct file *file, void *fh, struct v4l2_tuner *vt)
  1310. {
  1311. struct vivid_dev *dev = video_drvdata(file);
  1312. enum tpg_quality qual;
  1313. if (vt->index != 0)
  1314. return -EINVAL;
  1315. vt->capability = V4L2_TUNER_CAP_NORM | V4L2_TUNER_CAP_STEREO |
  1316. V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2;
  1317. vt->audmode = dev->tv_audmode;
  1318. vt->rangelow = MIN_TV_FREQ;
  1319. vt->rangehigh = MAX_TV_FREQ;
  1320. qual = vivid_get_quality(dev, &vt->afc);
  1321. if (qual == TPG_QUAL_COLOR)
  1322. vt->signal = 0xffff;
  1323. else if (qual == TPG_QUAL_GRAY)
  1324. vt->signal = 0x8000;
  1325. else
  1326. vt->signal = 0;
  1327. if (qual == TPG_QUAL_NOISE) {
  1328. vt->rxsubchans = 0;
  1329. } else if (qual == TPG_QUAL_GRAY) {
  1330. vt->rxsubchans = V4L2_TUNER_SUB_MONO;
  1331. } else {
  1332. unsigned channel_nr = dev->tv_freq / (6 * 16);
  1333. unsigned options = (dev->std_cap & V4L2_STD_NTSC_M) ? 4 : 3;
  1334. switch (channel_nr % options) {
  1335. case 0:
  1336. vt->rxsubchans = V4L2_TUNER_SUB_MONO;
  1337. break;
  1338. case 1:
  1339. vt->rxsubchans = V4L2_TUNER_SUB_STEREO;
  1340. break;
  1341. case 2:
  1342. if (dev->std_cap & V4L2_STD_NTSC_M)
  1343. vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_SAP;
  1344. else
  1345. vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2;
  1346. break;
  1347. case 3:
  1348. vt->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_SAP;
  1349. break;
  1350. }
  1351. }
  1352. strlcpy(vt->name, "TV Tuner", sizeof(vt->name));
  1353. return 0;
  1354. }
  1355. /* Must remain in sync with the vivid_ctrl_standard_strings array */
  1356. const v4l2_std_id vivid_standard[] = {
  1357. V4L2_STD_NTSC_M,
  1358. V4L2_STD_NTSC_M_JP,
  1359. V4L2_STD_NTSC_M_KR,
  1360. V4L2_STD_NTSC_443,
  1361. V4L2_STD_PAL_BG | V4L2_STD_PAL_H,
  1362. V4L2_STD_PAL_I,
  1363. V4L2_STD_PAL_DK,
  1364. V4L2_STD_PAL_M,
  1365. V4L2_STD_PAL_N,
  1366. V4L2_STD_PAL_Nc,
  1367. V4L2_STD_PAL_60,
  1368. V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H,
  1369. V4L2_STD_SECAM_DK,
  1370. V4L2_STD_SECAM_L,
  1371. V4L2_STD_SECAM_LC,
  1372. V4L2_STD_UNKNOWN
  1373. };
  1374. /* Must remain in sync with the vivid_standard array */
  1375. const char * const vivid_ctrl_standard_strings[] = {
  1376. "NTSC-M",
  1377. "NTSC-M-JP",
  1378. "NTSC-M-KR",
  1379. "NTSC-443",
  1380. "PAL-BGH",
  1381. "PAL-I",
  1382. "PAL-DK",
  1383. "PAL-M",
  1384. "PAL-N",
  1385. "PAL-Nc",
  1386. "PAL-60",
  1387. "SECAM-BGH",
  1388. "SECAM-DK",
  1389. "SECAM-L",
  1390. "SECAM-Lc",
  1391. NULL,
  1392. };
  1393. int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *id)
  1394. {
  1395. struct vivid_dev *dev = video_drvdata(file);
  1396. if (!vivid_is_sdtv_cap(dev))
  1397. return -ENODATA;
  1398. if (dev->std_signal_mode == NO_SIGNAL ||
  1399. dev->std_signal_mode == NO_LOCK) {
  1400. *id = V4L2_STD_UNKNOWN;
  1401. return 0;
  1402. }
  1403. if (vivid_is_tv_cap(dev) && tpg_g_quality(&dev->tpg) == TPG_QUAL_NOISE) {
  1404. *id = V4L2_STD_UNKNOWN;
  1405. } else if (dev->std_signal_mode == CURRENT_STD) {
  1406. *id = dev->std_cap;
  1407. } else if (dev->std_signal_mode == SELECTED_STD) {
  1408. *id = dev->query_std;
  1409. } else {
  1410. *id = vivid_standard[dev->query_std_last];
  1411. dev->query_std_last = (dev->query_std_last + 1) % ARRAY_SIZE(vivid_standard);
  1412. }
  1413. return 0;
  1414. }
  1415. int vivid_vid_cap_s_std(struct file *file, void *priv, v4l2_std_id id)
  1416. {
  1417. struct vivid_dev *dev = video_drvdata(file);
  1418. if (!vivid_is_sdtv_cap(dev))
  1419. return -ENODATA;
  1420. if (dev->std_cap == id)
  1421. return 0;
  1422. if (vb2_is_busy(&dev->vb_vid_cap_q) || vb2_is_busy(&dev->vb_vbi_cap_q))
  1423. return -EBUSY;
  1424. dev->std_cap = id;
  1425. vivid_update_format_cap(dev, false);
  1426. return 0;
  1427. }
  1428. static void find_aspect_ratio(u32 width, u32 height,
  1429. u32 *num, u32 *denom)
  1430. {
  1431. if (!(height % 3) && ((height * 4 / 3) == width)) {
  1432. *num = 4;
  1433. *denom = 3;
  1434. } else if (!(height % 9) && ((height * 16 / 9) == width)) {
  1435. *num = 16;
  1436. *denom = 9;
  1437. } else if (!(height % 10) && ((height * 16 / 10) == width)) {
  1438. *num = 16;
  1439. *denom = 10;
  1440. } else if (!(height % 4) && ((height * 5 / 4) == width)) {
  1441. *num = 5;
  1442. *denom = 4;
  1443. } else if (!(height % 9) && ((height * 15 / 9) == width)) {
  1444. *num = 15;
  1445. *denom = 9;
  1446. } else { /* default to 16:9 */
  1447. *num = 16;
  1448. *denom = 9;
  1449. }
  1450. }
  1451. static bool valid_cvt_gtf_timings(struct v4l2_dv_timings *timings)
  1452. {
  1453. struct v4l2_bt_timings *bt = &timings->bt;
  1454. u32 total_h_pixel;
  1455. u32 total_v_lines;
  1456. u32 h_freq;
  1457. if (!v4l2_valid_dv_timings(timings, &vivid_dv_timings_cap,
  1458. NULL, NULL))
  1459. return false;
  1460. total_h_pixel = V4L2_DV_BT_FRAME_WIDTH(bt);
  1461. total_v_lines = V4L2_DV_BT_FRAME_HEIGHT(bt);
  1462. h_freq = (u32)bt->pixelclock / total_h_pixel;
  1463. if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_CVT)) {
  1464. if (v4l2_detect_cvt(total_v_lines, h_freq, bt->vsync, bt->width,
  1465. bt->polarities, bt->interlaced, timings))
  1466. return true;
  1467. }
  1468. if (bt->standards == 0 || (bt->standards & V4L2_DV_BT_STD_GTF)) {
  1469. struct v4l2_fract aspect_ratio;
  1470. find_aspect_ratio(bt->width, bt->height,
  1471. &aspect_ratio.numerator,
  1472. &aspect_ratio.denominator);
  1473. if (v4l2_detect_gtf(total_v_lines, h_freq, bt->vsync,
  1474. bt->polarities, bt->interlaced,
  1475. aspect_ratio, timings))
  1476. return true;
  1477. }
  1478. return false;
  1479. }
  1480. int vivid_vid_cap_s_dv_timings(struct file *file, void *_fh,
  1481. struct v4l2_dv_timings *timings)
  1482. {
  1483. struct vivid_dev *dev = video_drvdata(file);
  1484. if (!vivid_is_hdmi_cap(dev))
  1485. return -ENODATA;
  1486. if (!v4l2_find_dv_timings_cap(timings, &vivid_dv_timings_cap,
  1487. 0, NULL, NULL) &&
  1488. !valid_cvt_gtf_timings(timings))
  1489. return -EINVAL;
  1490. if (v4l2_match_dv_timings(timings, &dev->dv_timings_cap, 0, false))
  1491. return 0;
  1492. if (vb2_is_busy(&dev->vb_vid_cap_q))
  1493. return -EBUSY;
  1494. dev->dv_timings_cap = *timings;
  1495. vivid_update_format_cap(dev, false);
  1496. return 0;
  1497. }
  1498. int vidioc_query_dv_timings(struct file *file, void *_fh,
  1499. struct v4l2_dv_timings *timings)
  1500. {
  1501. struct vivid_dev *dev = video_drvdata(file);
  1502. if (!vivid_is_hdmi_cap(dev))
  1503. return -ENODATA;
  1504. if (dev->dv_timings_signal_mode == NO_SIGNAL ||
  1505. dev->edid_blocks == 0)
  1506. return -ENOLINK;
  1507. if (dev->dv_timings_signal_mode == NO_LOCK)
  1508. return -ENOLCK;
  1509. if (dev->dv_timings_signal_mode == OUT_OF_RANGE) {
  1510. timings->bt.pixelclock = vivid_dv_timings_cap.bt.max_pixelclock * 2;
  1511. return -ERANGE;
  1512. }
  1513. if (dev->dv_timings_signal_mode == CURRENT_DV_TIMINGS) {
  1514. *timings = dev->dv_timings_cap;
  1515. } else if (dev->dv_timings_signal_mode == SELECTED_DV_TIMINGS) {
  1516. *timings = v4l2_dv_timings_presets[dev->query_dv_timings];
  1517. } else {
  1518. *timings = v4l2_dv_timings_presets[dev->query_dv_timings_last];
  1519. dev->query_dv_timings_last = (dev->query_dv_timings_last + 1) %
  1520. dev->query_dv_timings_size;
  1521. }
  1522. return 0;
  1523. }
  1524. int vidioc_s_edid(struct file *file, void *_fh,
  1525. struct v4l2_edid *edid)
  1526. {
  1527. struct vivid_dev *dev = video_drvdata(file);
  1528. u16 phys_addr;
  1529. unsigned int i;
  1530. int ret;
  1531. memset(edid->reserved, 0, sizeof(edid->reserved));
  1532. if (edid->pad >= dev->num_inputs)
  1533. return -EINVAL;
  1534. if (dev->input_type[edid->pad] != HDMI || edid->start_block)
  1535. return -EINVAL;
  1536. if (edid->blocks == 0) {
  1537. dev->edid_blocks = 0;
  1538. phys_addr = CEC_PHYS_ADDR_INVALID;
  1539. goto set_phys_addr;
  1540. }
  1541. if (edid->blocks > dev->edid_max_blocks) {
  1542. edid->blocks = dev->edid_max_blocks;
  1543. return -E2BIG;
  1544. }
  1545. phys_addr = cec_get_edid_phys_addr(edid->edid, edid->blocks * 128, NULL);
  1546. ret = cec_phys_addr_validate(phys_addr, &phys_addr, NULL);
  1547. if (ret)
  1548. return ret;
  1549. if (vb2_is_busy(&dev->vb_vid_cap_q))
  1550. return -EBUSY;
  1551. dev->edid_blocks = edid->blocks;
  1552. memcpy(dev->edid, edid->edid, edid->blocks * 128);
  1553. set_phys_addr:
  1554. /* TODO: a proper hotplug detect cycle should be emulated here */
  1555. cec_s_phys_addr(dev->cec_rx_adap, phys_addr, false);
  1556. for (i = 0; i < MAX_OUTPUTS && dev->cec_tx_adap[i]; i++)
  1557. cec_s_phys_addr(dev->cec_tx_adap[i],
  1558. cec_phys_addr_for_input(phys_addr, i + 1),
  1559. false);
  1560. return 0;
  1561. }
  1562. int vidioc_enum_framesizes(struct file *file, void *fh,
  1563. struct v4l2_frmsizeenum *fsize)
  1564. {
  1565. struct vivid_dev *dev = video_drvdata(file);
  1566. if (!vivid_is_webcam(dev) && !dev->has_scaler_cap)
  1567. return -EINVAL;
  1568. if (vivid_get_format(dev, fsize->pixel_format) == NULL)
  1569. return -EINVAL;
  1570. if (vivid_is_webcam(dev)) {
  1571. if (fsize->index >= ARRAY_SIZE(webcam_sizes))
  1572. return -EINVAL;
  1573. fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE;
  1574. fsize->discrete = webcam_sizes[fsize->index];
  1575. return 0;
  1576. }
  1577. if (fsize->index)
  1578. return -EINVAL;
  1579. fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
  1580. fsize->stepwise.min_width = MIN_WIDTH;
  1581. fsize->stepwise.max_width = MAX_WIDTH * MAX_ZOOM;
  1582. fsize->stepwise.step_width = 2;
  1583. fsize->stepwise.min_height = MIN_HEIGHT;
  1584. fsize->stepwise.max_height = MAX_HEIGHT * MAX_ZOOM;
  1585. fsize->stepwise.step_height = 2;
  1586. return 0;
  1587. }
  1588. /* timeperframe is arbitrary and continuous */
  1589. int vidioc_enum_frameintervals(struct file *file, void *priv,
  1590. struct v4l2_frmivalenum *fival)
  1591. {
  1592. struct vivid_dev *dev = video_drvdata(file);
  1593. const struct vivid_fmt *fmt;
  1594. int i;
  1595. fmt = vivid_get_format(dev, fival->pixel_format);
  1596. if (!fmt)
  1597. return -EINVAL;
  1598. if (!vivid_is_webcam(dev)) {
  1599. if (fival->index)
  1600. return -EINVAL;
  1601. if (fival->width < MIN_WIDTH || fival->width > MAX_WIDTH * MAX_ZOOM)
  1602. return -EINVAL;
  1603. if (fival->height < MIN_HEIGHT || fival->height > MAX_HEIGHT * MAX_ZOOM)
  1604. return -EINVAL;
  1605. fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
  1606. fival->discrete = dev->timeperframe_vid_cap;
  1607. return 0;
  1608. }
  1609. for (i = 0; i < ARRAY_SIZE(webcam_sizes); i++)
  1610. if (fival->width == webcam_sizes[i].width &&
  1611. fival->height == webcam_sizes[i].height)
  1612. break;
  1613. if (i == ARRAY_SIZE(webcam_sizes))
  1614. return -EINVAL;
  1615. if (fival->index >= 2 * (VIVID_WEBCAM_SIZES - i))
  1616. return -EINVAL;
  1617. fival->type = V4L2_FRMIVAL_TYPE_DISCRETE;
  1618. fival->discrete = webcam_intervals[fival->index];
  1619. return 0;
  1620. }
  1621. int vivid_vid_cap_g_parm(struct file *file, void *priv,
  1622. struct v4l2_streamparm *parm)
  1623. {
  1624. struct vivid_dev *dev = video_drvdata(file);
  1625. if (parm->type != (dev->multiplanar ?
  1626. V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
  1627. V4L2_BUF_TYPE_VIDEO_CAPTURE))
  1628. return -EINVAL;
  1629. parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
  1630. parm->parm.capture.timeperframe = dev->timeperframe_vid_cap;
  1631. parm->parm.capture.readbuffers = 1;
  1632. return 0;
  1633. }
  1634. #define FRACT_CMP(a, OP, b) \
  1635. ((u64)(a).numerator * (b).denominator OP (u64)(b).numerator * (a).denominator)
  1636. int vivid_vid_cap_s_parm(struct file *file, void *priv,
  1637. struct v4l2_streamparm *parm)
  1638. {
  1639. struct vivid_dev *dev = video_drvdata(file);
  1640. unsigned ival_sz = 2 * (VIVID_WEBCAM_SIZES - dev->webcam_size_idx);
  1641. struct v4l2_fract tpf;
  1642. unsigned i;
  1643. if (parm->type != (dev->multiplanar ?
  1644. V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE :
  1645. V4L2_BUF_TYPE_VIDEO_CAPTURE))
  1646. return -EINVAL;
  1647. if (!vivid_is_webcam(dev))
  1648. return vivid_vid_cap_g_parm(file, priv, parm);
  1649. tpf = parm->parm.capture.timeperframe;
  1650. if (tpf.denominator == 0)
  1651. tpf = webcam_intervals[ival_sz - 1];
  1652. for (i = 0; i < ival_sz; i++)
  1653. if (FRACT_CMP(tpf, >=, webcam_intervals[i]))
  1654. break;
  1655. if (i == ival_sz)
  1656. i = ival_sz - 1;
  1657. dev->webcam_ival_idx = i;
  1658. tpf = webcam_intervals[dev->webcam_ival_idx];
  1659. tpf = FRACT_CMP(tpf, <, tpf_min) ? tpf_min : tpf;
  1660. tpf = FRACT_CMP(tpf, >, tpf_max) ? tpf_max : tpf;
  1661. /* resync the thread's timings */
  1662. dev->cap_seq_resync = true;
  1663. dev->timeperframe_vid_cap = tpf;
  1664. parm->parm.capture.capability = V4L2_CAP_TIMEPERFRAME;
  1665. parm->parm.capture.timeperframe = tpf;
  1666. parm->parm.capture.readbuffers = 1;
  1667. return 0;
  1668. }