12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057 |
- // SPDX-License-Identifier: GPL-2.0
- /*
- * Copyright (C) 2017 Intel Corporation
- *
- * Based partially on Intel IPU4 driver written by
- * Sakari Ailus <sakari.ailus@linux.intel.com>
- * Samu Onkalo <samu.onkalo@intel.com>
- * Jouni Högander <jouni.hogander@intel.com>
- * Jouni Ukkonen <jouni.ukkonen@intel.com>
- * Antti Laakso <antti.laakso@intel.com>
- * et al.
- *
- */
- #include <linux/delay.h>
- #include <linux/interrupt.h>
- #include <linux/module.h>
- #include <linux/pci.h>
- #include <linux/pm_runtime.h>
- #include <linux/property.h>
- #include <linux/vmalloc.h>
- #include <media/v4l2-ctrls.h>
- #include <media/v4l2-device.h>
- #include <media/v4l2-event.h>
- #include <media/v4l2-fwnode.h>
- #include <media/v4l2-ioctl.h>
- #include <media/videobuf2-dma-sg.h>
- #include "ipu3-cio2.h"
- struct ipu3_cio2_fmt {
- u32 mbus_code;
- u32 fourcc;
- u8 mipicode;
- };
- /*
- * These are raw formats used in Intel's third generation of
- * Image Processing Unit known as IPU3.
- * 10bit raw bayer packed, 32 bytes for every 25 pixels,
- * last LSB 6 bits unused.
- */
- static const struct ipu3_cio2_fmt formats[] = {
- { /* put default entry at beginning */
- .mbus_code = MEDIA_BUS_FMT_SGRBG10_1X10,
- .fourcc = V4L2_PIX_FMT_IPU3_SGRBG10,
- .mipicode = 0x2b,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SGBRG10_1X10,
- .fourcc = V4L2_PIX_FMT_IPU3_SGBRG10,
- .mipicode = 0x2b,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SBGGR10_1X10,
- .fourcc = V4L2_PIX_FMT_IPU3_SBGGR10,
- .mipicode = 0x2b,
- }, {
- .mbus_code = MEDIA_BUS_FMT_SRGGB10_1X10,
- .fourcc = V4L2_PIX_FMT_IPU3_SRGGB10,
- .mipicode = 0x2b,
- },
- };
- /*
- * cio2_find_format - lookup color format by fourcc or/and media bus code
- * @pixelformat: fourcc to match, ignored if null
- * @mbus_code: media bus code to match, ignored if null
- */
- static const struct ipu3_cio2_fmt *cio2_find_format(const u32 *pixelformat,
- const u32 *mbus_code)
- {
- unsigned int i;
- for (i = 0; i < ARRAY_SIZE(formats); i++) {
- if (pixelformat && *pixelformat != formats[i].fourcc)
- continue;
- if (mbus_code && *mbus_code != formats[i].mbus_code)
- continue;
- return &formats[i];
- }
- return NULL;
- }
- static inline u32 cio2_bytesperline(const unsigned int width)
- {
- /*
- * 64 bytes for every 50 pixels, the line length
- * in bytes is multiple of 64 (line end alignment).
- */
- return DIV_ROUND_UP(width, 50) * 64;
- }
- /**************** FBPT operations ****************/
- static void cio2_fbpt_exit_dummy(struct cio2_device *cio2)
- {
- if (cio2->dummy_lop) {
- dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
- cio2->dummy_lop, cio2->dummy_lop_bus_addr);
- cio2->dummy_lop = NULL;
- }
- if (cio2->dummy_page) {
- dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
- cio2->dummy_page, cio2->dummy_page_bus_addr);
- cio2->dummy_page = NULL;
- }
- }
- static int cio2_fbpt_init_dummy(struct cio2_device *cio2)
- {
- unsigned int i;
- cio2->dummy_page = dma_alloc_coherent(&cio2->pci_dev->dev,
- CIO2_PAGE_SIZE,
- &cio2->dummy_page_bus_addr,
- GFP_KERNEL);
- cio2->dummy_lop = dma_alloc_coherent(&cio2->pci_dev->dev,
- CIO2_PAGE_SIZE,
- &cio2->dummy_lop_bus_addr,
- GFP_KERNEL);
- if (!cio2->dummy_page || !cio2->dummy_lop) {
- cio2_fbpt_exit_dummy(cio2);
- return -ENOMEM;
- }
- /*
- * List of Pointers(LOP) contains 1024x32b pointers to 4KB page each
- * Initialize each entry to dummy_page bus base address.
- */
- for (i = 0; i < CIO2_PAGE_SIZE / sizeof(*cio2->dummy_lop); i++)
- cio2->dummy_lop[i] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
- return 0;
- }
- static void cio2_fbpt_entry_enable(struct cio2_device *cio2,
- struct cio2_fbpt_entry entry[CIO2_MAX_LOPS])
- {
- /*
- * The CPU first initializes some fields in fbpt, then sets
- * the VALID bit, this barrier is to ensure that the DMA(device)
- * does not see the VALID bit enabled before other fields are
- * initialized; otherwise it could lead to havoc.
- */
- dma_wmb();
- /*
- * Request interrupts for start and completion
- * Valid bit is applicable only to 1st entry
- */
- entry[0].first_entry.ctrl = CIO2_FBPT_CTRL_VALID |
- CIO2_FBPT_CTRL_IOC | CIO2_FBPT_CTRL_IOS;
- }
- /* Initialize fpbt entries to point to dummy frame */
- static void cio2_fbpt_entry_init_dummy(struct cio2_device *cio2,
- struct cio2_fbpt_entry
- entry[CIO2_MAX_LOPS])
- {
- unsigned int i;
- entry[0].first_entry.first_page_offset = 0;
- entry[1].second_entry.num_of_pages =
- CIO2_PAGE_SIZE / sizeof(u32) * CIO2_MAX_LOPS;
- entry[1].second_entry.last_page_available_bytes = CIO2_PAGE_SIZE - 1;
- for (i = 0; i < CIO2_MAX_LOPS; i++)
- entry[i].lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
- cio2_fbpt_entry_enable(cio2, entry);
- }
- /* Initialize fpbt entries to point to a given buffer */
- static void cio2_fbpt_entry_init_buf(struct cio2_device *cio2,
- struct cio2_buffer *b,
- struct cio2_fbpt_entry
- entry[CIO2_MAX_LOPS])
- {
- struct vb2_buffer *vb = &b->vbb.vb2_buf;
- unsigned int length = vb->planes[0].length;
- int remaining, i;
- entry[0].first_entry.first_page_offset = b->offset;
- remaining = length + entry[0].first_entry.first_page_offset;
- entry[1].second_entry.num_of_pages =
- DIV_ROUND_UP(remaining, CIO2_PAGE_SIZE);
- /*
- * last_page_available_bytes has the offset of the last byte in the
- * last page which is still accessible by DMA. DMA cannot access
- * beyond this point. Valid range for this is from 0 to 4095.
- * 0 indicates 1st byte in the page is DMA accessible.
- * 4095 (CIO2_PAGE_SIZE - 1) means every single byte in the last page
- * is available for DMA transfer.
- */
- entry[1].second_entry.last_page_available_bytes =
- (remaining & ~PAGE_MASK) ?
- (remaining & ~PAGE_MASK) - 1 :
- CIO2_PAGE_SIZE - 1;
- /* Fill FBPT */
- remaining = length;
- i = 0;
- while (remaining > 0) {
- entry->lop_page_addr = b->lop_bus_addr[i] >> PAGE_SHIFT;
- remaining -= CIO2_PAGE_SIZE / sizeof(u32) * CIO2_PAGE_SIZE;
- entry++;
- i++;
- }
- /*
- * The first not meaningful FBPT entry should point to a valid LOP
- */
- entry->lop_page_addr = cio2->dummy_lop_bus_addr >> PAGE_SHIFT;
- cio2_fbpt_entry_enable(cio2, entry);
- }
- static int cio2_fbpt_init(struct cio2_device *cio2, struct cio2_queue *q)
- {
- struct device *dev = &cio2->pci_dev->dev;
- q->fbpt = dma_alloc_coherent(dev, CIO2_FBPT_SIZE, &q->fbpt_bus_addr,
- GFP_KERNEL);
- if (!q->fbpt)
- return -ENOMEM;
- memset(q->fbpt, 0, CIO2_FBPT_SIZE);
- return 0;
- }
- static void cio2_fbpt_exit(struct cio2_queue *q, struct device *dev)
- {
- dma_free_coherent(dev, CIO2_FBPT_SIZE, q->fbpt, q->fbpt_bus_addr);
- }
- /**************** CSI2 hardware setup ****************/
- /*
- * The CSI2 receiver has several parameters affecting
- * the receiver timings. These depend on the MIPI bus frequency
- * F in Hz (sensor transmitter rate) as follows:
- * register value = (A/1e9 + B * UI) / COUNT_ACC
- * where
- * UI = 1 / (2 * F) in seconds
- * COUNT_ACC = counter accuracy in seconds
- * For IPU3 COUNT_ACC = 0.0625
- *
- * A and B are coefficients from the table below,
- * depending whether the register minimum or maximum value is
- * calculated.
- * Minimum Maximum
- * Clock lane A B A B
- * reg_rx_csi_dly_cnt_termen_clane 0 0 38 0
- * reg_rx_csi_dly_cnt_settle_clane 95 -8 300 -16
- * Data lanes
- * reg_rx_csi_dly_cnt_termen_dlane0 0 0 35 4
- * reg_rx_csi_dly_cnt_settle_dlane0 85 -2 145 -6
- * reg_rx_csi_dly_cnt_termen_dlane1 0 0 35 4
- * reg_rx_csi_dly_cnt_settle_dlane1 85 -2 145 -6
- * reg_rx_csi_dly_cnt_termen_dlane2 0 0 35 4
- * reg_rx_csi_dly_cnt_settle_dlane2 85 -2 145 -6
- * reg_rx_csi_dly_cnt_termen_dlane3 0 0 35 4
- * reg_rx_csi_dly_cnt_settle_dlane3 85 -2 145 -6
- *
- * We use the minimum values of both A and B.
- */
- /*
- * shift for keeping value range suitable for 32-bit integer arithmetics
- */
- #define LIMIT_SHIFT 8
- static s32 cio2_rx_timing(s32 a, s32 b, s64 freq, int def)
- {
- const u32 accinv = 16; /* invert of counter resolution */
- const u32 uiinv = 500000000; /* 1e9 / 2 */
- s32 r;
- freq >>= LIMIT_SHIFT;
- if (WARN_ON(freq <= 0 || freq > S32_MAX))
- return def;
- /*
- * b could be 0, -2 or -8, so |accinv * b| is always
- * less than (1 << ds) and thus |r| < 500000000.
- */
- r = accinv * b * (uiinv >> LIMIT_SHIFT);
- r = r / (s32)freq;
- /* max value of a is 95 */
- r += accinv * a;
- return r;
- };
- /* Calculate the the delay value for termination enable of clock lane HS Rx */
- static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q,
- struct cio2_csi2_timing *timing)
- {
- struct device *dev = &cio2->pci_dev->dev;
- struct v4l2_querymenu qm = {.id = V4L2_CID_LINK_FREQ, };
- struct v4l2_ctrl *link_freq;
- s64 freq;
- int r;
- if (!q->sensor)
- return -ENODEV;
- link_freq = v4l2_ctrl_find(q->sensor->ctrl_handler, V4L2_CID_LINK_FREQ);
- if (!link_freq) {
- dev_err(dev, "failed to find LINK_FREQ\n");
- return -EPIPE;
- }
- qm.index = v4l2_ctrl_g_ctrl(link_freq);
- r = v4l2_querymenu(q->sensor->ctrl_handler, &qm);
- if (r) {
- dev_err(dev, "failed to get menu item\n");
- return r;
- }
- if (!qm.value) {
- dev_err(dev, "error invalid link_freq\n");
- return -EINVAL;
- }
- freq = qm.value;
- timing->clk_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_A,
- CIO2_CSIRX_DLY_CNT_TERMEN_CLANE_B,
- freq,
- CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
- timing->clk_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_A,
- CIO2_CSIRX_DLY_CNT_SETTLE_CLANE_B,
- freq,
- CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
- timing->dat_termen = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_A,
- CIO2_CSIRX_DLY_CNT_TERMEN_DLANE_B,
- freq,
- CIO2_CSIRX_DLY_CNT_TERMEN_DEFAULT);
- timing->dat_settle = cio2_rx_timing(CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_A,
- CIO2_CSIRX_DLY_CNT_SETTLE_DLANE_B,
- freq,
- CIO2_CSIRX_DLY_CNT_SETTLE_DEFAULT);
- dev_dbg(dev, "freq ct value is %d\n", timing->clk_termen);
- dev_dbg(dev, "freq cs value is %d\n", timing->clk_settle);
- dev_dbg(dev, "freq dt value is %d\n", timing->dat_termen);
- dev_dbg(dev, "freq ds value is %d\n", timing->dat_settle);
- return 0;
- };
- static int cio2_hw_init(struct cio2_device *cio2, struct cio2_queue *q)
- {
- static const int NUM_VCS = 4;
- static const int SID; /* Stream id */
- static const int ENTRY;
- static const int FBPT_WIDTH = DIV_ROUND_UP(CIO2_MAX_LOPS,
- CIO2_FBPT_SUBENTRY_UNIT);
- const u32 num_buffers1 = CIO2_MAX_BUFFERS - 1;
- const struct ipu3_cio2_fmt *fmt;
- void __iomem *const base = cio2->base;
- u8 lanes, csi2bus = q->csi2.port;
- u8 sensor_vc = SENSOR_VIR_CH_DFLT;
- struct cio2_csi2_timing timing;
- int i, r;
- fmt = cio2_find_format(NULL, &q->subdev_fmt.code);
- if (!fmt)
- return -EINVAL;
- lanes = q->csi2.lanes;
- r = cio2_csi2_calc_timing(cio2, q, &timing);
- if (r)
- return r;
- writel(timing.clk_termen, q->csi_rx_base +
- CIO2_REG_CSIRX_DLY_CNT_TERMEN(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
- writel(timing.clk_settle, q->csi_rx_base +
- CIO2_REG_CSIRX_DLY_CNT_SETTLE(CIO2_CSIRX_DLY_CNT_CLANE_IDX));
- for (i = 0; i < lanes; i++) {
- writel(timing.dat_termen, q->csi_rx_base +
- CIO2_REG_CSIRX_DLY_CNT_TERMEN(i));
- writel(timing.dat_settle, q->csi_rx_base +
- CIO2_REG_CSIRX_DLY_CNT_SETTLE(i));
- }
- writel(CIO2_PBM_WMCTRL1_MIN_2CK |
- CIO2_PBM_WMCTRL1_MID1_2CK |
- CIO2_PBM_WMCTRL1_MID2_2CK, base + CIO2_REG_PBM_WMCTRL1);
- writel(CIO2_PBM_WMCTRL2_HWM_2CK << CIO2_PBM_WMCTRL2_HWM_2CK_SHIFT |
- CIO2_PBM_WMCTRL2_LWM_2CK << CIO2_PBM_WMCTRL2_LWM_2CK_SHIFT |
- CIO2_PBM_WMCTRL2_OBFFWM_2CK <<
- CIO2_PBM_WMCTRL2_OBFFWM_2CK_SHIFT |
- CIO2_PBM_WMCTRL2_TRANSDYN << CIO2_PBM_WMCTRL2_TRANSDYN_SHIFT |
- CIO2_PBM_WMCTRL2_OBFF_MEM_EN, base + CIO2_REG_PBM_WMCTRL2);
- writel(CIO2_PBM_ARB_CTRL_LANES_DIV <<
- CIO2_PBM_ARB_CTRL_LANES_DIV_SHIFT |
- CIO2_PBM_ARB_CTRL_LE_EN |
- CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN <<
- CIO2_PBM_ARB_CTRL_PLL_POST_SHTDN_SHIFT |
- CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP <<
- CIO2_PBM_ARB_CTRL_PLL_AHD_WK_UP_SHIFT,
- base + CIO2_REG_PBM_ARB_CTRL);
- writel(CIO2_CSIRX_STATUS_DLANE_HS_MASK,
- q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_HS);
- writel(CIO2_CSIRX_STATUS_DLANE_LP_MASK,
- q->csi_rx_base + CIO2_REG_CSIRX_STATUS_DLANE_LP);
- writel(CIO2_FB_HPLL_FREQ, base + CIO2_REG_FB_HPLL_FREQ);
- writel(CIO2_ISCLK_RATIO, base + CIO2_REG_ISCLK_RATIO);
- /* Configure MIPI backend */
- for (i = 0; i < NUM_VCS; i++)
- writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_SP_LUT_ENTRY(i));
- /* There are 16 short packet LUT entry */
- for (i = 0; i < 16; i++)
- writel(CIO2_MIPIBE_LP_LUT_ENTRY_DISREGARD,
- q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(i));
- writel(CIO2_MIPIBE_GLOBAL_LUT_DISREGARD,
- q->csi_rx_base + CIO2_REG_MIPIBE_GLOBAL_LUT_DISREGARD);
- writel(CIO2_INT_EN_EXT_IE_MASK, base + CIO2_REG_INT_EN_EXT_IE);
- writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
- writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
- writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_EDGE);
- writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_LEVEL_NOT_PULSE);
- writel(CIO2_INT_EN_EXT_OE_MASK, base + CIO2_REG_INT_EN_EXT_OE);
- writel(CIO2_REG_INT_EN_IRQ | CIO2_INT_IOC(CIO2_DMA_CHAN) |
- CIO2_REG_INT_EN_IOS(CIO2_DMA_CHAN),
- base + CIO2_REG_INT_EN);
- writel((CIO2_PXM_PXF_FMT_CFG_BPP_10 | CIO2_PXM_PXF_FMT_CFG_PCK_64B)
- << CIO2_PXM_PXF_FMT_CFG_SID0_SHIFT,
- base + CIO2_REG_PXM_PXF_FMT_CFG0(csi2bus));
- writel(SID << CIO2_MIPIBE_LP_LUT_ENTRY_SID_SHIFT |
- sensor_vc << CIO2_MIPIBE_LP_LUT_ENTRY_VC_SHIFT |
- fmt->mipicode << CIO2_MIPIBE_LP_LUT_ENTRY_FORMAT_TYPE_SHIFT,
- q->csi_rx_base + CIO2_REG_MIPIBE_LP_LUT_ENTRY(ENTRY));
- writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_COMP_FORMAT(sensor_vc));
- writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_FORCE_RAW8);
- writel(0, base + CIO2_REG_PXM_SID2BID0(csi2bus));
- writel(lanes, q->csi_rx_base + CIO2_REG_CSIRX_NOF_ENABLED_LANES);
- writel(CIO2_CGC_PRIM_TGE |
- CIO2_CGC_SIDE_TGE |
- CIO2_CGC_XOSC_TGE |
- CIO2_CGC_D3I3_TGE |
- CIO2_CGC_CSI2_INTERFRAME_TGE |
- CIO2_CGC_CSI2_PORT_DCGE |
- CIO2_CGC_SIDE_DCGE |
- CIO2_CGC_PRIM_DCGE |
- CIO2_CGC_ROSC_DCGE |
- CIO2_CGC_XOSC_DCGE |
- CIO2_CGC_CLKGATE_HOLDOFF << CIO2_CGC_CLKGATE_HOLDOFF_SHIFT |
- CIO2_CGC_CSI_CLKGATE_HOLDOFF
- << CIO2_CGC_CSI_CLKGATE_HOLDOFF_SHIFT, base + CIO2_REG_CGC);
- writel(CIO2_LTRCTRL_LTRDYNEN, base + CIO2_REG_LTRCTRL);
- writel(CIO2_LTRVAL0_VAL << CIO2_LTRVAL02_VAL_SHIFT |
- CIO2_LTRVAL0_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
- CIO2_LTRVAL1_VAL << CIO2_LTRVAL13_VAL_SHIFT |
- CIO2_LTRVAL1_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
- base + CIO2_REG_LTRVAL01);
- writel(CIO2_LTRVAL2_VAL << CIO2_LTRVAL02_VAL_SHIFT |
- CIO2_LTRVAL2_SCALE << CIO2_LTRVAL02_SCALE_SHIFT |
- CIO2_LTRVAL3_VAL << CIO2_LTRVAL13_VAL_SHIFT |
- CIO2_LTRVAL3_SCALE << CIO2_LTRVAL13_SCALE_SHIFT,
- base + CIO2_REG_LTRVAL23);
- for (i = 0; i < CIO2_NUM_DMA_CHAN; i++) {
- writel(0, base + CIO2_REG_CDMABA(i));
- writel(0, base + CIO2_REG_CDMAC0(i));
- writel(0, base + CIO2_REG_CDMAC1(i));
- }
- /* Enable DMA */
- writel(q->fbpt_bus_addr >> PAGE_SHIFT,
- base + CIO2_REG_CDMABA(CIO2_DMA_CHAN));
- writel(num_buffers1 << CIO2_CDMAC0_FBPT_LEN_SHIFT |
- FBPT_WIDTH << CIO2_CDMAC0_FBPT_WIDTH_SHIFT |
- CIO2_CDMAC0_DMA_INTR_ON_FE |
- CIO2_CDMAC0_FBPT_UPDATE_FIFO_FULL |
- CIO2_CDMAC0_DMA_EN |
- CIO2_CDMAC0_DMA_INTR_ON_FS |
- CIO2_CDMAC0_DMA_HALTED, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
- writel(1 << CIO2_CDMAC1_LINENUMUPDATE_SHIFT,
- base + CIO2_REG_CDMAC1(CIO2_DMA_CHAN));
- writel(0, base + CIO2_REG_PBM_FOPN_ABORT);
- writel(CIO2_PXM_FRF_CFG_CRC_TH << CIO2_PXM_FRF_CFG_CRC_TH_SHIFT |
- CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NR |
- CIO2_PXM_FRF_CFG_MSK_ECC_RE |
- CIO2_PXM_FRF_CFG_MSK_ECC_DPHY_NE,
- base + CIO2_REG_PXM_FRF_CFG(q->csi2.port));
- /* Clear interrupts */
- writel(CIO2_IRQCTRL_MASK, q->csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
- writel(~0, base + CIO2_REG_INT_STS_EXT_OE);
- writel(~0, base + CIO2_REG_INT_STS_EXT_IE);
- writel(~0, base + CIO2_REG_INT_STS);
- /* Enable devices, starting from the last device in the pipe */
- writel(1, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
- writel(1, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
- return 0;
- }
- static void cio2_hw_exit(struct cio2_device *cio2, struct cio2_queue *q)
- {
- void __iomem *base = cio2->base;
- unsigned int i, maxloops = 1000;
- /* Disable CSI receiver and MIPI backend devices */
- writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_MASK);
- writel(0, q->csi_rx_base + CIO2_REG_IRQCTRL_ENABLE);
- writel(0, q->csi_rx_base + CIO2_REG_CSIRX_ENABLE);
- writel(0, q->csi_rx_base + CIO2_REG_MIPIBE_ENABLE);
- /* Halt DMA */
- writel(0, base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN));
- do {
- if (readl(base + CIO2_REG_CDMAC0(CIO2_DMA_CHAN)) &
- CIO2_CDMAC0_DMA_HALTED)
- break;
- usleep_range(1000, 2000);
- } while (--maxloops);
- if (!maxloops)
- dev_err(&cio2->pci_dev->dev,
- "DMA %i can not be halted\n", CIO2_DMA_CHAN);
- for (i = 0; i < CIO2_NUM_PORTS; i++) {
- writel(readl(base + CIO2_REG_PXM_FRF_CFG(i)) |
- CIO2_PXM_FRF_CFG_ABORT, base + CIO2_REG_PXM_FRF_CFG(i));
- writel(readl(base + CIO2_REG_PBM_FOPN_ABORT) |
- CIO2_PBM_FOPN_ABORT(i), base + CIO2_REG_PBM_FOPN_ABORT);
- }
- }
- static void cio2_buffer_done(struct cio2_device *cio2, unsigned int dma_chan)
- {
- struct device *dev = &cio2->pci_dev->dev;
- struct cio2_queue *q = cio2->cur_queue;
- int buffers_found = 0;
- u64 ns = ktime_get_ns();
- if (dma_chan >= CIO2_QUEUES) {
- dev_err(dev, "bad DMA channel %i\n", dma_chan);
- return;
- }
- /* Find out which buffer(s) are ready */
- do {
- struct cio2_fbpt_entry *const entry =
- &q->fbpt[q->bufs_first * CIO2_MAX_LOPS];
- struct cio2_buffer *b;
- if (entry->first_entry.ctrl & CIO2_FBPT_CTRL_VALID)
- break;
- b = q->bufs[q->bufs_first];
- if (b) {
- unsigned int bytes = entry[1].second_entry.num_of_bytes;
- q->bufs[q->bufs_first] = NULL;
- atomic_dec(&q->bufs_queued);
- dev_dbg(&cio2->pci_dev->dev,
- "buffer %i done\n", b->vbb.vb2_buf.index);
- b->vbb.vb2_buf.timestamp = ns;
- b->vbb.field = V4L2_FIELD_NONE;
- b->vbb.sequence = atomic_read(&q->frame_sequence);
- if (b->vbb.vb2_buf.planes[0].length != bytes)
- dev_warn(dev, "buffer length is %d received %d\n",
- b->vbb.vb2_buf.planes[0].length,
- bytes);
- vb2_buffer_done(&b->vbb.vb2_buf, VB2_BUF_STATE_DONE);
- }
- atomic_inc(&q->frame_sequence);
- cio2_fbpt_entry_init_dummy(cio2, entry);
- q->bufs_first = (q->bufs_first + 1) % CIO2_MAX_BUFFERS;
- buffers_found++;
- } while (1);
- if (buffers_found == 0)
- dev_warn(&cio2->pci_dev->dev,
- "no ready buffers found on DMA channel %u\n",
- dma_chan);
- }
- static void cio2_queue_event_sof(struct cio2_device *cio2, struct cio2_queue *q)
- {
- /*
- * For the user space camera control algorithms it is essential
- * to know when the reception of a frame has begun. That's often
- * the best timing information to get from the hardware.
- */
- struct v4l2_event event = {
- .type = V4L2_EVENT_FRAME_SYNC,
- .u.frame_sync.frame_sequence = atomic_read(&q->frame_sequence),
- };
- v4l2_event_queue(q->subdev.devnode, &event);
- }
- static const char *const cio2_irq_errs[] = {
- "single packet header error corrected",
- "multiple packet header errors detected",
- "payload checksum (CRC) error",
- "fifo overflow",
- "reserved short packet data type detected",
- "reserved long packet data type detected",
- "incomplete long packet detected",
- "frame sync error",
- "line sync error",
- "DPHY start of transmission error",
- "DPHY synchronization error",
- "escape mode error",
- "escape mode trigger event",
- "escape mode ultra-low power state for data lane(s)",
- "escape mode ultra-low power state exit for clock lane",
- "inter-frame short packet discarded",
- "inter-frame long packet discarded",
- "non-matching Long Packet stalled",
- };
- static const char *const cio2_port_errs[] = {
- "ECC recoverable",
- "DPHY not recoverable",
- "ECC not recoverable",
- "CRC error",
- "INTERFRAMEDATA",
- "PKT2SHORT",
- "PKT2LONG",
- };
- static void cio2_irq_handle_once(struct cio2_device *cio2, u32 int_status)
- {
- void __iomem *const base = cio2->base;
- struct device *dev = &cio2->pci_dev->dev;
- if (int_status & CIO2_INT_IOOE) {
- /*
- * Interrupt on Output Error:
- * 1) SRAM is full and FS received, or
- * 2) An invalid bit detected by DMA.
- */
- u32 oe_status, oe_clear;
- oe_clear = readl(base + CIO2_REG_INT_STS_EXT_OE);
- oe_status = oe_clear;
- if (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK) {
- dev_err(dev, "DMA output error: 0x%x\n",
- (oe_status & CIO2_INT_EXT_OE_DMAOE_MASK)
- >> CIO2_INT_EXT_OE_DMAOE_SHIFT);
- oe_status &= ~CIO2_INT_EXT_OE_DMAOE_MASK;
- }
- if (oe_status & CIO2_INT_EXT_OE_OES_MASK) {
- dev_err(dev, "DMA output error on CSI2 buses: 0x%x\n",
- (oe_status & CIO2_INT_EXT_OE_OES_MASK)
- >> CIO2_INT_EXT_OE_OES_SHIFT);
- oe_status &= ~CIO2_INT_EXT_OE_OES_MASK;
- }
- writel(oe_clear, base + CIO2_REG_INT_STS_EXT_OE);
- if (oe_status)
- dev_warn(dev, "unknown interrupt 0x%x on OE\n",
- oe_status);
- int_status &= ~CIO2_INT_IOOE;
- }
- if (int_status & CIO2_INT_IOC_MASK) {
- /* DMA IO done -- frame ready */
- u32 clr = 0;
- unsigned int d;
- for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
- if (int_status & CIO2_INT_IOC(d)) {
- clr |= CIO2_INT_IOC(d);
- cio2_buffer_done(cio2, d);
- }
- int_status &= ~clr;
- }
- if (int_status & CIO2_INT_IOS_IOLN_MASK) {
- /* DMA IO starts or reached specified line */
- u32 clr = 0;
- unsigned int d;
- for (d = 0; d < CIO2_NUM_DMA_CHAN; d++)
- if (int_status & CIO2_INT_IOS_IOLN(d)) {
- clr |= CIO2_INT_IOS_IOLN(d);
- if (d == CIO2_DMA_CHAN)
- cio2_queue_event_sof(cio2,
- cio2->cur_queue);
- }
- int_status &= ~clr;
- }
- if (int_status & (CIO2_INT_IOIE | CIO2_INT_IOIRQ)) {
- /* CSI2 receiver (error) interrupt */
- u32 ie_status, ie_clear;
- unsigned int port;
- ie_clear = readl(base + CIO2_REG_INT_STS_EXT_IE);
- ie_status = ie_clear;
- for (port = 0; port < CIO2_NUM_PORTS; port++) {
- u32 port_status = (ie_status >> (port * 8)) & 0xff;
- u32 err_mask = BIT_MASK(ARRAY_SIZE(cio2_port_errs)) - 1;
- void __iomem *const csi_rx_base =
- base + CIO2_REG_PIPE_BASE(port);
- unsigned int i;
- while (port_status & err_mask) {
- i = ffs(port_status) - 1;
- dev_err(dev, "port %i error %s\n",
- port, cio2_port_errs[i]);
- ie_status &= ~BIT(port * 8 + i);
- port_status &= ~BIT(i);
- }
- if (ie_status & CIO2_INT_EXT_IE_IRQ(port)) {
- u32 csi2_status, csi2_clear;
- csi2_status = readl(csi_rx_base +
- CIO2_REG_IRQCTRL_STATUS);
- csi2_clear = csi2_status;
- err_mask =
- BIT_MASK(ARRAY_SIZE(cio2_irq_errs)) - 1;
- while (csi2_status & err_mask) {
- i = ffs(csi2_status) - 1;
- dev_err(dev,
- "CSI-2 receiver port %i: %s\n",
- port, cio2_irq_errs[i]);
- csi2_status &= ~BIT(i);
- }
- writel(csi2_clear,
- csi_rx_base + CIO2_REG_IRQCTRL_CLEAR);
- if (csi2_status)
- dev_warn(dev,
- "unknown CSI2 error 0x%x on port %i\n",
- csi2_status, port);
- ie_status &= ~CIO2_INT_EXT_IE_IRQ(port);
- }
- }
- writel(ie_clear, base + CIO2_REG_INT_STS_EXT_IE);
- if (ie_status)
- dev_warn(dev, "unknown interrupt 0x%x on IE\n",
- ie_status);
- int_status &= ~(CIO2_INT_IOIE | CIO2_INT_IOIRQ);
- }
- if (int_status)
- dev_warn(dev, "unknown interrupt 0x%x on INT\n", int_status);
- }
- static irqreturn_t cio2_irq(int irq, void *cio2_ptr)
- {
- struct cio2_device *cio2 = cio2_ptr;
- void __iomem *const base = cio2->base;
- struct device *dev = &cio2->pci_dev->dev;
- u32 int_status;
- int_status = readl(base + CIO2_REG_INT_STS);
- dev_dbg(dev, "isr enter - interrupt status 0x%x\n", int_status);
- if (!int_status)
- return IRQ_NONE;
- do {
- writel(int_status, base + CIO2_REG_INT_STS);
- cio2_irq_handle_once(cio2, int_status);
- int_status = readl(base + CIO2_REG_INT_STS);
- if (int_status)
- dev_dbg(dev, "pending status 0x%x\n", int_status);
- } while (int_status);
- return IRQ_HANDLED;
- }
- /**************** Videobuf2 interface ****************/
- static void cio2_vb2_return_all_buffers(struct cio2_queue *q,
- enum vb2_buffer_state state)
- {
- unsigned int i;
- for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
- if (q->bufs[i]) {
- atomic_dec(&q->bufs_queued);
- vb2_buffer_done(&q->bufs[i]->vbb.vb2_buf,
- state);
- }
- }
- }
- static int cio2_vb2_queue_setup(struct vb2_queue *vq,
- unsigned int *num_buffers,
- unsigned int *num_planes,
- unsigned int sizes[],
- struct device *alloc_devs[])
- {
- struct cio2_device *cio2 = vb2_get_drv_priv(vq);
- struct cio2_queue *q = vb2q_to_cio2_queue(vq);
- unsigned int i;
- *num_planes = q->format.num_planes;
- for (i = 0; i < *num_planes; ++i) {
- sizes[i] = q->format.plane_fmt[i].sizeimage;
- alloc_devs[i] = &cio2->pci_dev->dev;
- }
- *num_buffers = clamp_val(*num_buffers, 1, CIO2_MAX_BUFFERS);
- /* Initialize buffer queue */
- for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
- q->bufs[i] = NULL;
- cio2_fbpt_entry_init_dummy(cio2, &q->fbpt[i * CIO2_MAX_LOPS]);
- }
- atomic_set(&q->bufs_queued, 0);
- q->bufs_first = 0;
- q->bufs_next = 0;
- return 0;
- }
- /* Called after each buffer is allocated */
- static int cio2_vb2_buf_init(struct vb2_buffer *vb)
- {
- struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
- struct device *dev = &cio2->pci_dev->dev;
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
- static const unsigned int entries_per_page =
- CIO2_PAGE_SIZE / sizeof(u32);
- unsigned int pages = DIV_ROUND_UP(vb->planes[0].length, CIO2_PAGE_SIZE);
- unsigned int lops = DIV_ROUND_UP(pages + 1, entries_per_page);
- struct sg_table *sg;
- struct sg_page_iter sg_iter;
- int i, j;
- if (lops <= 0 || lops > CIO2_MAX_LOPS) {
- dev_err(dev, "%s: bad buffer size (%i)\n", __func__,
- vb->planes[0].length);
- return -ENOSPC; /* Should never happen */
- }
- memset(b->lop, 0, sizeof(b->lop));
- /* Allocate LOP table */
- for (i = 0; i < lops; i++) {
- b->lop[i] = dma_alloc_coherent(dev, CIO2_PAGE_SIZE,
- &b->lop_bus_addr[i], GFP_KERNEL);
- if (!b->lop[i])
- goto fail;
- }
- /* Fill LOP */
- sg = vb2_dma_sg_plane_desc(vb, 0);
- if (!sg)
- return -ENOMEM;
- if (sg->nents && sg->sgl)
- b->offset = sg->sgl->offset;
- i = j = 0;
- for_each_sg_page(sg->sgl, &sg_iter, sg->nents, 0) {
- if (!pages--)
- break;
- b->lop[i][j] = sg_page_iter_dma_address(&sg_iter) >> PAGE_SHIFT;
- j++;
- if (j == entries_per_page) {
- i++;
- j = 0;
- }
- }
- b->lop[i][j] = cio2->dummy_page_bus_addr >> PAGE_SHIFT;
- return 0;
- fail:
- for (i--; i >= 0; i--)
- dma_free_coherent(dev, CIO2_PAGE_SIZE,
- b->lop[i], b->lop_bus_addr[i]);
- return -ENOMEM;
- }
- /* Transfer buffer ownership to cio2 */
- static void cio2_vb2_buf_queue(struct vb2_buffer *vb)
- {
- struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
- struct cio2_queue *q =
- container_of(vb->vb2_queue, struct cio2_queue, vbq);
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
- struct cio2_fbpt_entry *entry;
- unsigned long flags;
- unsigned int i, j, next = q->bufs_next;
- int bufs_queued = atomic_inc_return(&q->bufs_queued);
- u32 fbpt_rp;
- dev_dbg(&cio2->pci_dev->dev, "queue buffer %d\n", vb->index);
- /*
- * This code queues the buffer to the CIO2 DMA engine, which starts
- * running once streaming has started. It is possible that this code
- * gets pre-empted due to increased CPU load. Upon this, the driver
- * does not get an opportunity to queue new buffers to the CIO2 DMA
- * engine. When the DMA engine encounters an FBPT entry without the
- * VALID bit set, the DMA engine halts, which requires a restart of
- * the DMA engine and sensor, to continue streaming.
- * This is not desired and is highly unlikely given that there are
- * 32 FBPT entries that the DMA engine needs to process, to run into
- * an FBPT entry, without the VALID bit set. We try to mitigate this
- * by disabling interrupts for the duration of this queueing.
- */
- local_irq_save(flags);
- fbpt_rp = (readl(cio2->base + CIO2_REG_CDMARI(CIO2_DMA_CHAN))
- >> CIO2_CDMARI_FBPT_RP_SHIFT)
- & CIO2_CDMARI_FBPT_RP_MASK;
- /*
- * fbpt_rp is the fbpt entry that the dma is currently working
- * on, but since it could jump to next entry at any time,
- * assume that we might already be there.
- */
- fbpt_rp = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
- if (bufs_queued <= 1 || fbpt_rp == next)
- /* Buffers were drained */
- next = (fbpt_rp + 1) % CIO2_MAX_BUFFERS;
- for (i = 0; i < CIO2_MAX_BUFFERS; i++) {
- /*
- * We have allocated CIO2_MAX_BUFFERS circularly for the
- * hw, the user has requested N buffer queue. The driver
- * ensures N <= CIO2_MAX_BUFFERS and guarantees that whenever
- * user queues a buffer, there necessarily is a free buffer.
- */
- if (!q->bufs[next]) {
- q->bufs[next] = b;
- entry = &q->fbpt[next * CIO2_MAX_LOPS];
- cio2_fbpt_entry_init_buf(cio2, b, entry);
- local_irq_restore(flags);
- q->bufs_next = (next + 1) % CIO2_MAX_BUFFERS;
- for (j = 0; j < vb->num_planes; j++)
- vb2_set_plane_payload(vb, j,
- q->format.plane_fmt[j].sizeimage);
- return;
- }
- dev_dbg(&cio2->pci_dev->dev, "entry %i was full!\n", next);
- next = (next + 1) % CIO2_MAX_BUFFERS;
- }
- local_irq_restore(flags);
- dev_err(&cio2->pci_dev->dev, "error: all cio2 entries were full!\n");
- atomic_dec(&q->bufs_queued);
- vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
- }
- /* Called when each buffer is freed */
- static void cio2_vb2_buf_cleanup(struct vb2_buffer *vb)
- {
- struct cio2_device *cio2 = vb2_get_drv_priv(vb->vb2_queue);
- struct cio2_buffer *b =
- container_of(vb, struct cio2_buffer, vbb.vb2_buf);
- unsigned int i;
- /* Free LOP table */
- for (i = 0; i < CIO2_MAX_LOPS; i++) {
- if (b->lop[i])
- dma_free_coherent(&cio2->pci_dev->dev, CIO2_PAGE_SIZE,
- b->lop[i], b->lop_bus_addr[i]);
- }
- }
- static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count)
- {
- struct cio2_queue *q = vb2q_to_cio2_queue(vq);
- struct cio2_device *cio2 = vb2_get_drv_priv(vq);
- int r;
- cio2->cur_queue = q;
- atomic_set(&q->frame_sequence, 0);
- r = pm_runtime_get_sync(&cio2->pci_dev->dev);
- if (r < 0) {
- dev_info(&cio2->pci_dev->dev, "failed to set power %d\n", r);
- pm_runtime_put_noidle(&cio2->pci_dev->dev);
- return r;
- }
- r = media_pipeline_start(&q->vdev.entity, &q->pipe);
- if (r)
- goto fail_pipeline;
- r = cio2_hw_init(cio2, q);
- if (r)
- goto fail_hw;
- /* Start streaming on sensor */
- r = v4l2_subdev_call(q->sensor, video, s_stream, 1);
- if (r)
- goto fail_csi2_subdev;
- cio2->streaming = true;
- return 0;
- fail_csi2_subdev:
- cio2_hw_exit(cio2, q);
- fail_hw:
- media_pipeline_stop(&q->vdev.entity);
- fail_pipeline:
- dev_dbg(&cio2->pci_dev->dev, "failed to start streaming (%d)\n", r);
- cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED);
- pm_runtime_put(&cio2->pci_dev->dev);
- return r;
- }
- static void cio2_vb2_stop_streaming(struct vb2_queue *vq)
- {
- struct cio2_queue *q = vb2q_to_cio2_queue(vq);
- struct cio2_device *cio2 = vb2_get_drv_priv(vq);
- if (v4l2_subdev_call(q->sensor, video, s_stream, 0))
- dev_err(&cio2->pci_dev->dev,
- "failed to stop sensor streaming\n");
- cio2_hw_exit(cio2, q);
- synchronize_irq(cio2->pci_dev->irq);
- cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR);
- media_pipeline_stop(&q->vdev.entity);
- pm_runtime_put(&cio2->pci_dev->dev);
- cio2->streaming = false;
- }
- static const struct vb2_ops cio2_vb2_ops = {
- .buf_init = cio2_vb2_buf_init,
- .buf_queue = cio2_vb2_buf_queue,
- .buf_cleanup = cio2_vb2_buf_cleanup,
- .queue_setup = cio2_vb2_queue_setup,
- .start_streaming = cio2_vb2_start_streaming,
- .stop_streaming = cio2_vb2_stop_streaming,
- .wait_prepare = vb2_ops_wait_prepare,
- .wait_finish = vb2_ops_wait_finish,
- };
- /**************** V4L2 interface ****************/
- static int cio2_v4l2_querycap(struct file *file, void *fh,
- struct v4l2_capability *cap)
- {
- struct cio2_device *cio2 = video_drvdata(file);
- strlcpy(cap->driver, CIO2_NAME, sizeof(cap->driver));
- strlcpy(cap->card, CIO2_DEVICE_NAME, sizeof(cap->card));
- snprintf(cap->bus_info, sizeof(cap->bus_info),
- "PCI:%s", pci_name(cio2->pci_dev));
- return 0;
- }
- static int cio2_v4l2_enum_fmt(struct file *file, void *fh,
- struct v4l2_fmtdesc *f)
- {
- if (f->index >= ARRAY_SIZE(formats))
- return -EINVAL;
- f->pixelformat = formats[f->index].fourcc;
- return 0;
- }
- /* The format is validated in cio2_video_link_validate() */
- static int cio2_v4l2_g_fmt(struct file *file, void *fh, struct v4l2_format *f)
- {
- struct cio2_queue *q = file_to_cio2_queue(file);
- f->fmt.pix_mp = q->format;
- return 0;
- }
- static int cio2_v4l2_try_fmt(struct file *file, void *fh, struct v4l2_format *f)
- {
- const struct ipu3_cio2_fmt *fmt;
- struct v4l2_pix_format_mplane *mpix = &f->fmt.pix_mp;
- fmt = cio2_find_format(&mpix->pixelformat, NULL);
- if (!fmt)
- fmt = &formats[0];
- /* Only supports up to 4224x3136 */
- if (mpix->width > CIO2_IMAGE_MAX_WIDTH)
- mpix->width = CIO2_IMAGE_MAX_WIDTH;
- if (mpix->height > CIO2_IMAGE_MAX_LENGTH)
- mpix->height = CIO2_IMAGE_MAX_LENGTH;
- mpix->num_planes = 1;
- mpix->pixelformat = fmt->fourcc;
- mpix->colorspace = V4L2_COLORSPACE_RAW;
- mpix->field = V4L2_FIELD_NONE;
- memset(mpix->reserved, 0, sizeof(mpix->reserved));
- mpix->plane_fmt[0].bytesperline = cio2_bytesperline(mpix->width);
- mpix->plane_fmt[0].sizeimage = mpix->plane_fmt[0].bytesperline *
- mpix->height;
- memset(mpix->plane_fmt[0].reserved, 0,
- sizeof(mpix->plane_fmt[0].reserved));
- /* use default */
- mpix->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT;
- mpix->quantization = V4L2_QUANTIZATION_DEFAULT;
- mpix->xfer_func = V4L2_XFER_FUNC_DEFAULT;
- return 0;
- }
- static int cio2_v4l2_s_fmt(struct file *file, void *fh, struct v4l2_format *f)
- {
- struct cio2_queue *q = file_to_cio2_queue(file);
- cio2_v4l2_try_fmt(file, fh, f);
- q->format = f->fmt.pix_mp;
- return 0;
- }
- static int
- cio2_video_enum_input(struct file *file, void *fh, struct v4l2_input *input)
- {
- if (input->index > 0)
- return -EINVAL;
- strlcpy(input->name, "camera", sizeof(input->name));
- input->type = V4L2_INPUT_TYPE_CAMERA;
- return 0;
- }
- static int
- cio2_video_g_input(struct file *file, void *fh, unsigned int *input)
- {
- *input = 0;
- return 0;
- }
- static int
- cio2_video_s_input(struct file *file, void *fh, unsigned int input)
- {
- return input == 0 ? 0 : -EINVAL;
- }
- static const struct v4l2_file_operations cio2_v4l2_fops = {
- .owner = THIS_MODULE,
- .unlocked_ioctl = video_ioctl2,
- .open = v4l2_fh_open,
- .release = vb2_fop_release,
- .poll = vb2_fop_poll,
- .mmap = vb2_fop_mmap,
- };
- static const struct v4l2_ioctl_ops cio2_v4l2_ioctl_ops = {
- .vidioc_querycap = cio2_v4l2_querycap,
- .vidioc_enum_fmt_vid_cap_mplane = cio2_v4l2_enum_fmt,
- .vidioc_g_fmt_vid_cap_mplane = cio2_v4l2_g_fmt,
- .vidioc_s_fmt_vid_cap_mplane = cio2_v4l2_s_fmt,
- .vidioc_try_fmt_vid_cap_mplane = cio2_v4l2_try_fmt,
- .vidioc_reqbufs = vb2_ioctl_reqbufs,
- .vidioc_create_bufs = vb2_ioctl_create_bufs,
- .vidioc_prepare_buf = vb2_ioctl_prepare_buf,
- .vidioc_querybuf = vb2_ioctl_querybuf,
- .vidioc_qbuf = vb2_ioctl_qbuf,
- .vidioc_dqbuf = vb2_ioctl_dqbuf,
- .vidioc_streamon = vb2_ioctl_streamon,
- .vidioc_streamoff = vb2_ioctl_streamoff,
- .vidioc_expbuf = vb2_ioctl_expbuf,
- .vidioc_enum_input = cio2_video_enum_input,
- .vidioc_g_input = cio2_video_g_input,
- .vidioc_s_input = cio2_video_s_input,
- };
- static int cio2_subdev_subscribe_event(struct v4l2_subdev *sd,
- struct v4l2_fh *fh,
- struct v4l2_event_subscription *sub)
- {
- if (sub->type != V4L2_EVENT_FRAME_SYNC)
- return -EINVAL;
- /* Line number. For now only zero accepted. */
- if (sub->id != 0)
- return -EINVAL;
- return v4l2_event_subscribe(fh, sub, 0, NULL);
- }
- static int cio2_subdev_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
- {
- struct v4l2_mbus_framefmt *format;
- const struct v4l2_mbus_framefmt fmt_default = {
- .width = 1936,
- .height = 1096,
- .code = formats[0].mbus_code,
- .field = V4L2_FIELD_NONE,
- .colorspace = V4L2_COLORSPACE_RAW,
- .ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT,
- .quantization = V4L2_QUANTIZATION_DEFAULT,
- .xfer_func = V4L2_XFER_FUNC_DEFAULT,
- };
- /* Initialize try_fmt */
- format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SINK);
- *format = fmt_default;
- /* same as sink */
- format = v4l2_subdev_get_try_format(sd, fh->pad, CIO2_PAD_SOURCE);
- *format = fmt_default;
- return 0;
- }
- /*
- * cio2_subdev_get_fmt - Handle get format by pads subdev method
- * @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
- static int cio2_subdev_get_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
- {
- struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
- struct v4l2_subdev_format format;
- int ret;
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- fmt->format = *v4l2_subdev_get_try_format(sd, cfg, fmt->pad);
- return 0;
- }
- if (fmt->pad == CIO2_PAD_SINK) {
- format.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(sd, pad, get_fmt, NULL,
- &format);
- if (ret)
- return ret;
- /* update colorspace etc */
- q->subdev_fmt.colorspace = format.format.colorspace;
- q->subdev_fmt.ycbcr_enc = format.format.ycbcr_enc;
- q->subdev_fmt.quantization = format.format.quantization;
- q->subdev_fmt.xfer_func = format.format.xfer_func;
- }
- fmt->format = q->subdev_fmt;
- return 0;
- }
- /*
- * cio2_subdev_set_fmt - Handle set format by pads subdev method
- * @sd : pointer to v4l2 subdev structure
- * @cfg: V4L2 subdev pad config
- * @fmt: pointer to v4l2 subdev format structure
- * return -EINVAL or zero on success
- */
- static int cio2_subdev_set_fmt(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_format *fmt)
- {
- struct cio2_queue *q = container_of(sd, struct cio2_queue, subdev);
- /*
- * Only allow setting sink pad format;
- * source always propagates from sink
- */
- if (fmt->pad == CIO2_PAD_SOURCE)
- return cio2_subdev_get_fmt(sd, cfg, fmt);
- if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) {
- *v4l2_subdev_get_try_format(sd, cfg, fmt->pad) = fmt->format;
- } else {
- /* It's the sink, allow changing frame size */
- q->subdev_fmt.width = fmt->format.width;
- q->subdev_fmt.height = fmt->format.height;
- q->subdev_fmt.code = fmt->format.code;
- fmt->format = q->subdev_fmt;
- }
- return 0;
- }
- static int cio2_subdev_enum_mbus_code(struct v4l2_subdev *sd,
- struct v4l2_subdev_pad_config *cfg,
- struct v4l2_subdev_mbus_code_enum *code)
- {
- if (code->index >= ARRAY_SIZE(formats))
- return -EINVAL;
- code->code = formats[code->index].mbus_code;
- return 0;
- }
- static int cio2_subdev_link_validate_get_format(struct media_pad *pad,
- struct v4l2_subdev_format *fmt)
- {
- if (is_media_entity_v4l2_subdev(pad->entity)) {
- struct v4l2_subdev *sd =
- media_entity_to_v4l2_subdev(pad->entity);
- fmt->which = V4L2_SUBDEV_FORMAT_ACTIVE;
- fmt->pad = pad->index;
- return v4l2_subdev_call(sd, pad, get_fmt, NULL, fmt);
- }
- return -EINVAL;
- }
- static int cio2_video_link_validate(struct media_link *link)
- {
- struct video_device *vd = container_of(link->sink->entity,
- struct video_device, entity);
- struct cio2_queue *q = container_of(vd, struct cio2_queue, vdev);
- struct cio2_device *cio2 = video_get_drvdata(vd);
- struct v4l2_subdev_format source_fmt;
- int ret;
- if (!media_entity_remote_pad(link->sink->entity->pads)) {
- dev_info(&cio2->pci_dev->dev,
- "video node %s pad not connected\n", vd->name);
- return -ENOTCONN;
- }
- ret = cio2_subdev_link_validate_get_format(link->source, &source_fmt);
- if (ret < 0)
- return 0;
- if (source_fmt.format.width != q->format.width ||
- source_fmt.format.height != q->format.height) {
- dev_err(&cio2->pci_dev->dev,
- "Wrong width or height %ux%u (%ux%u expected)\n",
- q->format.width, q->format.height,
- source_fmt.format.width, source_fmt.format.height);
- return -EINVAL;
- }
- if (!cio2_find_format(&q->format.pixelformat, &source_fmt.format.code))
- return -EINVAL;
- return 0;
- }
- static const struct v4l2_subdev_core_ops cio2_subdev_core_ops = {
- .subscribe_event = cio2_subdev_subscribe_event,
- .unsubscribe_event = v4l2_event_subdev_unsubscribe,
- };
- static const struct v4l2_subdev_internal_ops cio2_subdev_internal_ops = {
- .open = cio2_subdev_open,
- };
- static const struct v4l2_subdev_pad_ops cio2_subdev_pad_ops = {
- .link_validate = v4l2_subdev_link_validate_default,
- .get_fmt = cio2_subdev_get_fmt,
- .set_fmt = cio2_subdev_set_fmt,
- .enum_mbus_code = cio2_subdev_enum_mbus_code,
- };
- static const struct v4l2_subdev_ops cio2_subdev_ops = {
- .core = &cio2_subdev_core_ops,
- .pad = &cio2_subdev_pad_ops,
- };
- /******* V4L2 sub-device asynchronous registration callbacks***********/
- struct sensor_async_subdev {
- struct v4l2_async_subdev asd;
- struct csi2_bus_info csi2;
- };
- /* The .bound() notifier callback when a match is found */
- static int cio2_notifier_bound(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
- {
- struct cio2_device *cio2 = container_of(notifier,
- struct cio2_device, notifier);
- struct sensor_async_subdev *s_asd = container_of(asd,
- struct sensor_async_subdev, asd);
- struct cio2_queue *q;
- if (cio2->queue[s_asd->csi2.port].sensor)
- return -EBUSY;
- q = &cio2->queue[s_asd->csi2.port];
- q->csi2 = s_asd->csi2;
- q->sensor = sd;
- q->csi_rx_base = cio2->base + CIO2_REG_PIPE_BASE(q->csi2.port);
- return 0;
- }
- /* The .unbind callback */
- static void cio2_notifier_unbind(struct v4l2_async_notifier *notifier,
- struct v4l2_subdev *sd,
- struct v4l2_async_subdev *asd)
- {
- struct cio2_device *cio2 = container_of(notifier,
- struct cio2_device, notifier);
- struct sensor_async_subdev *s_asd = container_of(asd,
- struct sensor_async_subdev, asd);
- cio2->queue[s_asd->csi2.port].sensor = NULL;
- }
- /* .complete() is called after all subdevices have been located */
- static int cio2_notifier_complete(struct v4l2_async_notifier *notifier)
- {
- struct cio2_device *cio2 = container_of(notifier, struct cio2_device,
- notifier);
- struct sensor_async_subdev *s_asd;
- struct cio2_queue *q;
- unsigned int i, pad;
- int ret;
- for (i = 0; i < notifier->num_subdevs; i++) {
- s_asd = container_of(cio2->notifier.subdevs[i],
- struct sensor_async_subdev, asd);
- q = &cio2->queue[s_asd->csi2.port];
- for (pad = 0; pad < q->sensor->entity.num_pads; pad++)
- if (q->sensor->entity.pads[pad].flags &
- MEDIA_PAD_FL_SOURCE)
- break;
- if (pad == q->sensor->entity.num_pads) {
- dev_err(&cio2->pci_dev->dev,
- "failed to find src pad for %s\n",
- q->sensor->name);
- return -ENXIO;
- }
- ret = media_create_pad_link(
- &q->sensor->entity, pad,
- &q->subdev.entity, CIO2_PAD_SINK,
- 0);
- if (ret) {
- dev_err(&cio2->pci_dev->dev,
- "failed to create link for %s\n",
- cio2->queue[i].sensor->name);
- return ret;
- }
- }
- return v4l2_device_register_subdev_nodes(&cio2->v4l2_dev);
- }
- static const struct v4l2_async_notifier_operations cio2_async_ops = {
- .bound = cio2_notifier_bound,
- .unbind = cio2_notifier_unbind,
- .complete = cio2_notifier_complete,
- };
- static int cio2_fwnode_parse(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd)
- {
- struct sensor_async_subdev *s_asd =
- container_of(asd, struct sensor_async_subdev, asd);
- if (vep->bus_type != V4L2_MBUS_CSI2) {
- dev_err(dev, "Only CSI2 bus type is currently supported\n");
- return -EINVAL;
- }
- s_asd->csi2.port = vep->base.port;
- s_asd->csi2.lanes = vep->bus.mipi_csi2.num_data_lanes;
- return 0;
- }
- static int cio2_notifier_init(struct cio2_device *cio2)
- {
- int ret;
- ret = v4l2_async_notifier_parse_fwnode_endpoints(
- &cio2->pci_dev->dev, &cio2->notifier,
- sizeof(struct sensor_async_subdev),
- cio2_fwnode_parse);
- if (ret < 0)
- return ret;
- if (!cio2->notifier.num_subdevs)
- return -ENODEV; /* no endpoint */
- cio2->notifier.ops = &cio2_async_ops;
- ret = v4l2_async_notifier_register(&cio2->v4l2_dev, &cio2->notifier);
- if (ret) {
- dev_err(&cio2->pci_dev->dev,
- "failed to register async notifier : %d\n", ret);
- v4l2_async_notifier_cleanup(&cio2->notifier);
- }
- return ret;
- }
- static void cio2_notifier_exit(struct cio2_device *cio2)
- {
- v4l2_async_notifier_unregister(&cio2->notifier);
- v4l2_async_notifier_cleanup(&cio2->notifier);
- }
- /**************** Queue initialization ****************/
- static const struct media_entity_operations cio2_media_ops = {
- .link_validate = v4l2_subdev_link_validate,
- };
- static const struct media_entity_operations cio2_video_entity_ops = {
- .link_validate = cio2_video_link_validate,
- };
- static int cio2_queue_init(struct cio2_device *cio2, struct cio2_queue *q)
- {
- static const u32 default_width = 1936;
- static const u32 default_height = 1096;
- const struct ipu3_cio2_fmt dflt_fmt = formats[0];
- struct video_device *vdev = &q->vdev;
- struct vb2_queue *vbq = &q->vbq;
- struct v4l2_subdev *subdev = &q->subdev;
- struct v4l2_mbus_framefmt *fmt;
- int r;
- /* Initialize miscellaneous variables */
- mutex_init(&q->lock);
- /* Initialize formats to default values */
- fmt = &q->subdev_fmt;
- fmt->width = default_width;
- fmt->height = default_height;
- fmt->code = dflt_fmt.mbus_code;
- fmt->field = V4L2_FIELD_NONE;
- q->format.width = default_width;
- q->format.height = default_height;
- q->format.pixelformat = dflt_fmt.fourcc;
- q->format.colorspace = V4L2_COLORSPACE_RAW;
- q->format.field = V4L2_FIELD_NONE;
- q->format.num_planes = 1;
- q->format.plane_fmt[0].bytesperline =
- cio2_bytesperline(q->format.width);
- q->format.plane_fmt[0].sizeimage = q->format.plane_fmt[0].bytesperline *
- q->format.height;
- /* Initialize fbpt */
- r = cio2_fbpt_init(cio2, q);
- if (r)
- goto fail_fbpt;
- /* Initialize media entities */
- q->subdev_pads[CIO2_PAD_SINK].flags = MEDIA_PAD_FL_SINK |
- MEDIA_PAD_FL_MUST_CONNECT;
- q->subdev_pads[CIO2_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
- subdev->entity.ops = &cio2_media_ops;
- subdev->internal_ops = &cio2_subdev_internal_ops;
- r = media_entity_pads_init(&subdev->entity, CIO2_PADS, q->subdev_pads);
- if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize subdev media entity (%d)\n", r);
- goto fail_subdev_media_entity;
- }
- q->vdev_pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT;
- vdev->entity.ops = &cio2_video_entity_ops;
- r = media_entity_pads_init(&vdev->entity, 1, &q->vdev_pad);
- if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize videodev media entity (%d)\n", r);
- goto fail_vdev_media_entity;
- }
- /* Initialize subdev */
- v4l2_subdev_init(subdev, &cio2_subdev_ops);
- subdev->flags = V4L2_SUBDEV_FL_HAS_DEVNODE | V4L2_SUBDEV_FL_HAS_EVENTS;
- subdev->owner = THIS_MODULE;
- snprintf(subdev->name, sizeof(subdev->name),
- CIO2_ENTITY_NAME " %td", q - cio2->queue);
- v4l2_set_subdevdata(subdev, cio2);
- r = v4l2_device_register_subdev(&cio2->v4l2_dev, subdev);
- if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed initialize subdev (%d)\n", r);
- goto fail_subdev;
- }
- /* Initialize vbq */
- vbq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
- vbq->io_modes = VB2_USERPTR | VB2_MMAP | VB2_DMABUF;
- vbq->ops = &cio2_vb2_ops;
- vbq->mem_ops = &vb2_dma_sg_memops;
- vbq->buf_struct_size = sizeof(struct cio2_buffer);
- vbq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- vbq->min_buffers_needed = 1;
- vbq->drv_priv = cio2;
- vbq->lock = &q->lock;
- r = vb2_queue_init(vbq);
- if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed to initialize videobuf2 queue (%d)\n", r);
- goto fail_vbq;
- }
- /* Initialize vdev */
- snprintf(vdev->name, sizeof(vdev->name),
- "%s %td", CIO2_NAME, q - cio2->queue);
- vdev->release = video_device_release_empty;
- vdev->fops = &cio2_v4l2_fops;
- vdev->ioctl_ops = &cio2_v4l2_ioctl_ops;
- vdev->lock = &cio2->lock;
- vdev->v4l2_dev = &cio2->v4l2_dev;
- vdev->queue = &q->vbq;
- vdev->device_caps = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_STREAMING;
- video_set_drvdata(vdev, cio2);
- r = video_register_device(vdev, VFL_TYPE_GRABBER, -1);
- if (r) {
- dev_err(&cio2->pci_dev->dev,
- "failed to register video device (%d)\n", r);
- goto fail_vdev;
- }
- /* Create link from CIO2 subdev to output node */
- r = media_create_pad_link(
- &subdev->entity, CIO2_PAD_SOURCE, &vdev->entity, 0,
- MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
- if (r)
- goto fail_link;
- return 0;
- fail_link:
- video_unregister_device(&q->vdev);
- fail_vdev:
- vb2_queue_release(vbq);
- fail_vbq:
- v4l2_device_unregister_subdev(subdev);
- fail_subdev:
- media_entity_cleanup(&vdev->entity);
- fail_vdev_media_entity:
- media_entity_cleanup(&subdev->entity);
- fail_subdev_media_entity:
- cio2_fbpt_exit(q, &cio2->pci_dev->dev);
- fail_fbpt:
- mutex_destroy(&q->lock);
- return r;
- }
- static void cio2_queue_exit(struct cio2_device *cio2, struct cio2_queue *q)
- {
- video_unregister_device(&q->vdev);
- media_entity_cleanup(&q->vdev.entity);
- vb2_queue_release(&q->vbq);
- v4l2_device_unregister_subdev(&q->subdev);
- media_entity_cleanup(&q->subdev.entity);
- cio2_fbpt_exit(q, &cio2->pci_dev->dev);
- mutex_destroy(&q->lock);
- }
- static int cio2_queues_init(struct cio2_device *cio2)
- {
- int i, r;
- for (i = 0; i < CIO2_QUEUES; i++) {
- r = cio2_queue_init(cio2, &cio2->queue[i]);
- if (r)
- break;
- }
- if (i == CIO2_QUEUES)
- return 0;
- for (i--; i >= 0; i--)
- cio2_queue_exit(cio2, &cio2->queue[i]);
- return r;
- }
- static void cio2_queues_exit(struct cio2_device *cio2)
- {
- unsigned int i;
- for (i = 0; i < CIO2_QUEUES; i++)
- cio2_queue_exit(cio2, &cio2->queue[i]);
- }
- /**************** PCI interface ****************/
- static int cio2_pci_config_setup(struct pci_dev *dev)
- {
- u16 pci_command;
- int r = pci_enable_msi(dev);
- if (r) {
- dev_err(&dev->dev, "failed to enable MSI (%d)\n", r);
- return r;
- }
- pci_read_config_word(dev, PCI_COMMAND, &pci_command);
- pci_command |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
- PCI_COMMAND_INTX_DISABLE;
- pci_write_config_word(dev, PCI_COMMAND, pci_command);
- return 0;
- }
- static int cio2_pci_probe(struct pci_dev *pci_dev,
- const struct pci_device_id *id)
- {
- struct cio2_device *cio2;
- void __iomem *const *iomap;
- int r;
- cio2 = devm_kzalloc(&pci_dev->dev, sizeof(*cio2), GFP_KERNEL);
- if (!cio2)
- return -ENOMEM;
- cio2->pci_dev = pci_dev;
- r = pcim_enable_device(pci_dev);
- if (r) {
- dev_err(&pci_dev->dev, "failed to enable device (%d)\n", r);
- return r;
- }
- dev_info(&pci_dev->dev, "device 0x%x (rev: 0x%x)\n",
- pci_dev->device, pci_dev->revision);
- r = pcim_iomap_regions(pci_dev, 1 << CIO2_PCI_BAR, pci_name(pci_dev));
- if (r) {
- dev_err(&pci_dev->dev, "failed to remap I/O memory (%d)\n", r);
- return -ENODEV;
- }
- iomap = pcim_iomap_table(pci_dev);
- if (!iomap) {
- dev_err(&pci_dev->dev, "failed to iomap table\n");
- return -ENODEV;
- }
- cio2->base = iomap[CIO2_PCI_BAR];
- pci_set_drvdata(pci_dev, cio2);
- pci_set_master(pci_dev);
- r = pci_set_dma_mask(pci_dev, CIO2_DMA_MASK);
- if (r) {
- dev_err(&pci_dev->dev, "failed to set DMA mask (%d)\n", r);
- return -ENODEV;
- }
- r = cio2_pci_config_setup(pci_dev);
- if (r)
- return -ENODEV;
- r = cio2_fbpt_init_dummy(cio2);
- if (r)
- return r;
- mutex_init(&cio2->lock);
- cio2->media_dev.dev = &cio2->pci_dev->dev;
- strlcpy(cio2->media_dev.model, CIO2_DEVICE_NAME,
- sizeof(cio2->media_dev.model));
- snprintf(cio2->media_dev.bus_info, sizeof(cio2->media_dev.bus_info),
- "PCI:%s", pci_name(cio2->pci_dev));
- cio2->media_dev.hw_revision = 0;
- media_device_init(&cio2->media_dev);
- r = media_device_register(&cio2->media_dev);
- if (r < 0)
- goto fail_mutex_destroy;
- cio2->v4l2_dev.mdev = &cio2->media_dev;
- r = v4l2_device_register(&pci_dev->dev, &cio2->v4l2_dev);
- if (r) {
- dev_err(&pci_dev->dev,
- "failed to register V4L2 device (%d)\n", r);
- goto fail_media_device_unregister;
- }
- r = cio2_queues_init(cio2);
- if (r)
- goto fail_v4l2_device_unregister;
- /* Register notifier for subdevices we care */
- r = cio2_notifier_init(cio2);
- if (r)
- goto fail_cio2_queue_exit;
- r = devm_request_irq(&pci_dev->dev, pci_dev->irq, cio2_irq,
- IRQF_SHARED, CIO2_NAME, cio2);
- if (r) {
- dev_err(&pci_dev->dev, "failed to request IRQ (%d)\n", r);
- goto fail;
- }
- pm_runtime_put_noidle(&pci_dev->dev);
- pm_runtime_allow(&pci_dev->dev);
- return 0;
- fail:
- cio2_notifier_exit(cio2);
- fail_cio2_queue_exit:
- cio2_queues_exit(cio2);
- fail_v4l2_device_unregister:
- v4l2_device_unregister(&cio2->v4l2_dev);
- fail_media_device_unregister:
- media_device_unregister(&cio2->media_dev);
- media_device_cleanup(&cio2->media_dev);
- fail_mutex_destroy:
- mutex_destroy(&cio2->lock);
- cio2_fbpt_exit_dummy(cio2);
- return r;
- }
- static void cio2_pci_remove(struct pci_dev *pci_dev)
- {
- struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
- unsigned int i;
- media_device_unregister(&cio2->media_dev);
- cio2_notifier_exit(cio2);
- for (i = 0; i < CIO2_QUEUES; i++)
- cio2_queue_exit(cio2, &cio2->queue[i]);
- cio2_fbpt_exit_dummy(cio2);
- v4l2_device_unregister(&cio2->v4l2_dev);
- media_device_cleanup(&cio2->media_dev);
- mutex_destroy(&cio2->lock);
- }
- static int __maybe_unused cio2_runtime_suspend(struct device *dev)
- {
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
- void __iomem *const base = cio2->base;
- u16 pm;
- writel(CIO2_D0I3C_I3, base + CIO2_REG_D0I3C);
- dev_dbg(dev, "cio2 runtime suspend.\n");
- pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
- pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
- pm |= CIO2_PMCSR_D3;
- pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
- return 0;
- }
- static int __maybe_unused cio2_runtime_resume(struct device *dev)
- {
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
- void __iomem *const base = cio2->base;
- u16 pm;
- writel(CIO2_D0I3C_RR, base + CIO2_REG_D0I3C);
- dev_dbg(dev, "cio2 runtime resume.\n");
- pci_read_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, &pm);
- pm = (pm >> CIO2_PMCSR_D0D3_SHIFT) << CIO2_PMCSR_D0D3_SHIFT;
- pci_write_config_word(pci_dev, pci_dev->pm_cap + CIO2_PMCSR_OFFSET, pm);
- return 0;
- }
- /*
- * Helper function to advance all the elements of a circular buffer by "start"
- * positions
- */
- static void arrange(void *ptr, size_t elem_size, size_t elems, size_t start)
- {
- struct {
- size_t begin, end;
- } arr[2] = {
- { 0, start - 1 },
- { start, elems - 1 },
- };
- #define CHUNK_SIZE(a) ((a)->end - (a)->begin + 1)
- /* Loop as long as we have out-of-place entries */
- while (CHUNK_SIZE(&arr[0]) && CHUNK_SIZE(&arr[1])) {
- size_t size0, i;
- /*
- * Find the number of entries that can be arranged on this
- * iteration.
- */
- size0 = min(CHUNK_SIZE(&arr[0]), CHUNK_SIZE(&arr[1]));
- /* Swap the entries in two parts of the array. */
- for (i = 0; i < size0; i++) {
- u8 *d = ptr + elem_size * (arr[1].begin + i);
- u8 *s = ptr + elem_size * (arr[0].begin + i);
- size_t j;
- for (j = 0; j < elem_size; j++)
- swap(d[j], s[j]);
- }
- if (CHUNK_SIZE(&arr[0]) > CHUNK_SIZE(&arr[1])) {
- /* The end of the first array remains unarranged. */
- arr[0].begin += size0;
- } else {
- /*
- * The first array is fully arranged so we proceed
- * handling the next one.
- */
- arr[0].begin = arr[1].begin;
- arr[0].end = arr[1].begin + size0 - 1;
- arr[1].begin += size0;
- }
- }
- }
- static void cio2_fbpt_rearrange(struct cio2_device *cio2, struct cio2_queue *q)
- {
- unsigned int i, j;
- for (i = 0, j = q->bufs_first; i < CIO2_MAX_BUFFERS;
- i++, j = (j + 1) % CIO2_MAX_BUFFERS)
- if (q->bufs[j])
- break;
- if (i == CIO2_MAX_BUFFERS)
- return;
- if (j) {
- arrange(q->fbpt, sizeof(struct cio2_fbpt_entry) * CIO2_MAX_LOPS,
- CIO2_MAX_BUFFERS, j);
- arrange(q->bufs, sizeof(struct cio2_buffer *),
- CIO2_MAX_BUFFERS, j);
- }
- /*
- * DMA clears the valid bit when accessing the buffer.
- * When stopping stream in suspend callback, some of the buffers
- * may be in invalid state. After resume, when DMA meets the invalid
- * buffer, it will halt and stop receiving new data.
- * To avoid DMA halting, set the valid bit for all buffers in FBPT.
- */
- for (i = 0; i < CIO2_MAX_BUFFERS; i++)
- cio2_fbpt_entry_enable(cio2, q->fbpt + i * CIO2_MAX_LOPS);
- }
- static int __maybe_unused cio2_suspend(struct device *dev)
- {
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
- struct cio2_queue *q = cio2->cur_queue;
- dev_dbg(dev, "cio2 suspend\n");
- if (!cio2->streaming)
- return 0;
- /* Stop stream */
- cio2_hw_exit(cio2, q);
- synchronize_irq(pci_dev->irq);
- pm_runtime_force_suspend(dev);
- /*
- * Upon resume, hw starts to process the fbpt entries from beginning,
- * so relocate the queued buffs to the fbpt head before suspend.
- */
- cio2_fbpt_rearrange(cio2, q);
- q->bufs_first = 0;
- q->bufs_next = 0;
- return 0;
- }
- static int __maybe_unused cio2_resume(struct device *dev)
- {
- struct pci_dev *pci_dev = to_pci_dev(dev);
- struct cio2_device *cio2 = pci_get_drvdata(pci_dev);
- int r = 0;
- struct cio2_queue *q = cio2->cur_queue;
- dev_dbg(dev, "cio2 resume\n");
- if (!cio2->streaming)
- return 0;
- /* Start stream */
- r = pm_runtime_force_resume(&cio2->pci_dev->dev);
- if (r < 0) {
- dev_err(&cio2->pci_dev->dev,
- "failed to set power %d\n", r);
- return r;
- }
- r = cio2_hw_init(cio2, q);
- if (r)
- dev_err(dev, "fail to init cio2 hw\n");
- return r;
- }
- static const struct dev_pm_ops cio2_pm_ops = {
- SET_RUNTIME_PM_OPS(&cio2_runtime_suspend, &cio2_runtime_resume, NULL)
- SET_SYSTEM_SLEEP_PM_OPS(&cio2_suspend, &cio2_resume)
- };
- static const struct pci_device_id cio2_pci_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_INTEL, CIO2_PCI_ID) },
- { 0 }
- };
- MODULE_DEVICE_TABLE(pci, cio2_pci_id_table);
- static struct pci_driver cio2_pci_driver = {
- .name = CIO2_NAME,
- .id_table = cio2_pci_id_table,
- .probe = cio2_pci_probe,
- .remove = cio2_pci_remove,
- .driver = {
- .pm = &cio2_pm_ops,
- },
- };
- module_pci_driver(cio2_pci_driver);
- MODULE_AUTHOR("Tuukka Toivonen <tuukka.toivonen@intel.com>");
- MODULE_AUTHOR("Tianshu Qiu <tian.shu.qiu@intel.com>");
- MODULE_AUTHOR("Jian Xu Zheng <jian.xu.zheng@intel.com>");
- MODULE_AUTHOR("Yuning Pu <yuning.pu@intel.com>");
- MODULE_AUTHOR("Yong Zhi <yong.zhi@intel.com>");
- MODULE_LICENSE("GPL v2");
- MODULE_DESCRIPTION("IPU3 CIO2 driver");
|