123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289 |
- /*
- *
- * (C) COPYRIGHT 2010-2016 ARM Limited. All rights reserved.
- *
- * This program is free software and is provided to you under the terms of the
- * GNU General Public License version 2 as published by the Free Software
- * Foundation, and any use by you of this program is subject to the terms
- * of such GNU licence.
- *
- * A copy of the licence is included with the program, and can also be obtained
- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
- *
- */
- /**
- * @file mali_kbase_mem.c
- * Base kernel memory APIs
- */
- #ifdef CONFIG_DMA_SHARED_BUFFER
- #include <linux/dma-buf.h>
- #endif /* CONFIG_DMA_SHARED_BUFFER */
- #ifdef CONFIG_UMP
- #include <linux/ump.h>
- #endif /* CONFIG_UMP */
- #include <linux/kernel.h>
- #include <linux/bug.h>
- #include <linux/compat.h>
- #include <linux/sched/mm.h>
- #include <mali_kbase_config.h>
- #include <mali_kbase.h>
- #include <mali_midg_regmap.h>
- #include <mali_kbase_cache_policy.h>
- #include <mali_kbase_hw.h>
- #include <mali_kbase_hwaccess_time.h>
- #include <mali_kbase_tlstream.h>
- /**
- * @brief Check the zone compatibility of two regions.
- */
- static int kbase_region_tracker_match_zone(struct kbase_va_region *reg1,
- struct kbase_va_region *reg2)
- {
- return ((reg1->flags & KBASE_REG_ZONE_MASK) ==
- (reg2->flags & KBASE_REG_ZONE_MASK));
- }
- /* This function inserts a region into the tree. */
- static void kbase_region_tracker_insert(struct kbase_context *kctx, struct kbase_va_region *new_reg)
- {
- u64 start_pfn = new_reg->start_pfn;
- struct rb_node **link = &(kctx->reg_rbtree.rb_node);
- struct rb_node *parent = NULL;
- /* Find the right place in the tree using tree search */
- while (*link) {
- struct kbase_va_region *old_reg;
- parent = *link;
- old_reg = rb_entry(parent, struct kbase_va_region, rblink);
- /* RBTree requires no duplicate entries. */
- KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn);
- if (old_reg->start_pfn > start_pfn)
- link = &(*link)->rb_left;
- else
- link = &(*link)->rb_right;
- }
- /* Put the new node there, and rebalance tree */
- rb_link_node(&(new_reg->rblink), parent, link);
- rb_insert_color(&(new_reg->rblink), &(kctx->reg_rbtree));
- }
- /* Find allocated region enclosing free range. */
- static struct kbase_va_region *kbase_region_tracker_find_region_enclosing_range_free(
- struct kbase_context *kctx, u64 start_pfn, size_t nr_pages)
- {
- struct rb_node *rbnode;
- struct kbase_va_region *reg;
- u64 end_pfn = start_pfn + nr_pages;
- rbnode = kctx->reg_rbtree.rb_node;
- while (rbnode) {
- u64 tmp_start_pfn, tmp_end_pfn;
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- tmp_start_pfn = reg->start_pfn;
- tmp_end_pfn = reg->start_pfn + reg->nr_pages;
- /* If start is lower than this, go left. */
- if (start_pfn < tmp_start_pfn)
- rbnode = rbnode->rb_left;
- /* If end is higher than this, then go right. */
- else if (end_pfn > tmp_end_pfn)
- rbnode = rbnode->rb_right;
- else /* Enclosing */
- return reg;
- }
- return NULL;
- }
- /* Find region enclosing given address. */
- struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr)
- {
- struct rb_node *rbnode;
- struct kbase_va_region *reg;
- u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
- KBASE_DEBUG_ASSERT(kctx != NULL);
- lockdep_assert_held(&kctx->reg_lock);
- rbnode = kctx->reg_rbtree.rb_node;
- while (rbnode) {
- u64 tmp_start_pfn, tmp_end_pfn;
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- tmp_start_pfn = reg->start_pfn;
- tmp_end_pfn = reg->start_pfn + reg->nr_pages;
- /* If start is lower than this, go left. */
- if (gpu_pfn < tmp_start_pfn)
- rbnode = rbnode->rb_left;
- /* If end is higher than this, then go right. */
- else if (gpu_pfn >= tmp_end_pfn)
- rbnode = rbnode->rb_right;
- else /* Enclosing */
- return reg;
- }
- return NULL;
- }
- /* Find region with given base address */
- struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr)
- {
- u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
- struct rb_node *rbnode;
- struct kbase_va_region *reg;
- KBASE_DEBUG_ASSERT(kctx != NULL);
- lockdep_assert_held(&kctx->reg_lock);
- rbnode = kctx->reg_rbtree.rb_node;
- while (rbnode) {
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- if (reg->start_pfn > gpu_pfn)
- rbnode = rbnode->rb_left;
- else if (reg->start_pfn < gpu_pfn)
- rbnode = rbnode->rb_right;
- else
- return reg;
- }
- return NULL;
- }
- /* Find region meeting given requirements */
- static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(struct kbase_context *kctx, struct kbase_va_region *reg_reqs, size_t nr_pages, size_t align)
- {
- struct rb_node *rbnode;
- struct kbase_va_region *reg;
- /* Note that this search is a linear search, as we do not have a target
- address in mind, so does not benefit from the rbtree search */
- rbnode = rb_first(&(kctx->reg_rbtree));
- while (rbnode) {
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- if ((reg->nr_pages >= nr_pages) &&
- (reg->flags & KBASE_REG_FREE) &&
- kbase_region_tracker_match_zone(reg, reg_reqs)) {
- /* Check alignment */
- u64 start_pfn = (reg->start_pfn + align - 1) & ~(align - 1);
- if ((start_pfn >= reg->start_pfn) &&
- (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) &&
- ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1)))
- return reg;
- }
- rbnode = rb_next(rbnode);
- }
- return NULL;
- }
- /**
- * @brief Remove a region object from the global list.
- *
- * The region reg is removed, possibly by merging with other free and
- * compatible adjacent regions. It must be called with the context
- * region lock held. The associated memory is not released (see
- * kbase_free_alloced_region). Internal use only.
- */
- static int kbase_remove_va_region(struct kbase_context *kctx, struct kbase_va_region *reg)
- {
- struct rb_node *rbprev;
- struct kbase_va_region *prev = NULL;
- struct rb_node *rbnext;
- struct kbase_va_region *next = NULL;
- int merged_front = 0;
- int merged_back = 0;
- int err = 0;
- /* Try to merge with the previous block first */
- rbprev = rb_prev(&(reg->rblink));
- if (rbprev) {
- prev = rb_entry(rbprev, struct kbase_va_region, rblink);
- if ((prev->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(prev, reg)) {
- /* We're compatible with the previous VMA, merge with it */
- prev->nr_pages += reg->nr_pages;
- rb_erase(&(reg->rblink), &kctx->reg_rbtree);
- reg = prev;
- merged_front = 1;
- }
- }
- /* Try to merge with the next block second */
- /* Note we do the lookup here as the tree may have been rebalanced. */
- rbnext = rb_next(&(reg->rblink));
- if (rbnext) {
- /* We're compatible with the next VMA, merge with it */
- next = rb_entry(rbnext, struct kbase_va_region, rblink);
- if ((next->flags & KBASE_REG_FREE) && kbase_region_tracker_match_zone(next, reg)) {
- next->start_pfn = reg->start_pfn;
- next->nr_pages += reg->nr_pages;
- rb_erase(&(reg->rblink), &kctx->reg_rbtree);
- merged_back = 1;
- if (merged_front) {
- /* We already merged with prev, free it */
- kbase_free_alloced_region(reg);
- }
- }
- }
- /* If we failed to merge then we need to add a new block */
- if (!(merged_front || merged_back)) {
- /*
- * We didn't merge anything. Add a new free
- * placeholder and remove the original one.
- */
- struct kbase_va_region *free_reg;
- free_reg = kbase_alloc_free_region(kctx, reg->start_pfn, reg->nr_pages, reg->flags & KBASE_REG_ZONE_MASK);
- if (!free_reg) {
- err = -ENOMEM;
- goto out;
- }
- rb_replace_node(&(reg->rblink), &(free_reg->rblink), &(kctx->reg_rbtree));
- }
- out:
- return err;
- }
- /**
- * @brief Insert a VA region to the list, replacing the current at_reg.
- */
- static int kbase_insert_va_region_nolock(struct kbase_context *kctx, struct kbase_va_region *new_reg, struct kbase_va_region *at_reg, u64 start_pfn, size_t nr_pages)
- {
- int err = 0;
- /* Must be a free region */
- KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0);
- /* start_pfn should be contained within at_reg */
- KBASE_DEBUG_ASSERT((start_pfn >= at_reg->start_pfn) && (start_pfn < at_reg->start_pfn + at_reg->nr_pages));
- /* at least nr_pages from start_pfn should be contained within at_reg */
- KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= at_reg->start_pfn + at_reg->nr_pages);
- new_reg->start_pfn = start_pfn;
- new_reg->nr_pages = nr_pages;
- /* Regions are a whole use, so swap and delete old one. */
- if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) {
- rb_replace_node(&(at_reg->rblink), &(new_reg->rblink), &(kctx->reg_rbtree));
- kbase_free_alloced_region(at_reg);
- }
- /* New region replaces the start of the old one, so insert before. */
- else if (at_reg->start_pfn == start_pfn) {
- at_reg->start_pfn += nr_pages;
- KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages);
- at_reg->nr_pages -= nr_pages;
- kbase_region_tracker_insert(kctx, new_reg);
- }
- /* New region replaces the end of the old one, so insert after. */
- else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) {
- at_reg->nr_pages -= nr_pages;
- kbase_region_tracker_insert(kctx, new_reg);
- }
- /* New region splits the old one, so insert and create new */
- else {
- struct kbase_va_region *new_front_reg;
- new_front_reg = kbase_alloc_free_region(kctx,
- at_reg->start_pfn,
- start_pfn - at_reg->start_pfn,
- at_reg->flags & KBASE_REG_ZONE_MASK);
- if (new_front_reg) {
- at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages;
- at_reg->start_pfn = start_pfn + nr_pages;
- kbase_region_tracker_insert(kctx, new_front_reg);
- kbase_region_tracker_insert(kctx, new_reg);
- } else {
- err = -ENOMEM;
- }
- }
- return err;
- }
- /**
- * @brief Add a VA region to the list.
- */
- int kbase_add_va_region(struct kbase_context *kctx,
- struct kbase_va_region *reg, u64 addr,
- size_t nr_pages, size_t align)
- {
- struct kbase_va_region *tmp;
- u64 gpu_pfn = addr >> PAGE_SHIFT;
- int err = 0;
- KBASE_DEBUG_ASSERT(kctx != NULL);
- KBASE_DEBUG_ASSERT(reg != NULL);
- lockdep_assert_held(&kctx->reg_lock);
- if (!align)
- align = 1;
- /* must be a power of 2 */
- KBASE_DEBUG_ASSERT((align & (align - 1)) == 0);
- KBASE_DEBUG_ASSERT(nr_pages > 0);
- /* Path 1: Map a specific address. Find the enclosing region, which *must* be free. */
- if (gpu_pfn) {
- struct device *dev = kctx->kbdev->dev;
- KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1)));
- tmp = kbase_region_tracker_find_region_enclosing_range_free(kctx, gpu_pfn, nr_pages);
- if (!tmp) {
- dev_warn(dev, "Enclosing region not found: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
- err = -ENOMEM;
- goto exit;
- }
- if ((!kbase_region_tracker_match_zone(tmp, reg)) ||
- (!(tmp->flags & KBASE_REG_FREE))) {
- dev_warn(dev, "Zone mismatch: %lu != %lu", tmp->flags & KBASE_REG_ZONE_MASK, reg->flags & KBASE_REG_ZONE_MASK);
- dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n", tmp->start_pfn, tmp->flags, tmp->nr_pages, gpu_pfn, nr_pages);
- dev_warn(dev, "in function %s (%p, %p, 0x%llx, 0x%zx, 0x%zx)\n", __func__, kctx, reg, addr, nr_pages, align);
- err = -ENOMEM;
- goto exit;
- }
- err = kbase_insert_va_region_nolock(kctx, reg, tmp, gpu_pfn, nr_pages);
- if (err) {
- dev_warn(dev, "Failed to insert va region");
- err = -ENOMEM;
- goto exit;
- }
- goto exit;
- }
- /* Path 2: Map any free address which meets the requirements. */
- {
- u64 start_pfn;
- /*
- * Depending on the zone the allocation request is for
- * we might need to retry it.
- */
- do {
- tmp = kbase_region_tracker_find_region_meeting_reqs(
- kctx, reg, nr_pages, align);
- if (tmp) {
- start_pfn = (tmp->start_pfn + align - 1) &
- ~(align - 1);
- err = kbase_insert_va_region_nolock(kctx, reg,
- tmp, start_pfn, nr_pages);
- break;
- }
- /*
- * If the allocation is not from the same zone as JIT
- * then don't retry, we're out of VA and there is
- * nothing which can be done about it.
- */
- if ((reg->flags & KBASE_REG_ZONE_MASK) !=
- KBASE_REG_ZONE_CUSTOM_VA)
- break;
- } while (kbase_jit_evict(kctx));
- if (!tmp)
- err = -ENOMEM;
- }
- exit:
- return err;
- }
- /**
- * @brief Initialize the internal region tracker data structure.
- */
- static void kbase_region_tracker_ds_init(struct kbase_context *kctx,
- struct kbase_va_region *same_va_reg,
- struct kbase_va_region *exec_reg,
- struct kbase_va_region *custom_va_reg)
- {
- kctx->reg_rbtree = RB_ROOT;
- kbase_region_tracker_insert(kctx, same_va_reg);
- /* exec and custom_va_reg doesn't always exist */
- if (exec_reg && custom_va_reg) {
- kbase_region_tracker_insert(kctx, exec_reg);
- kbase_region_tracker_insert(kctx, custom_va_reg);
- }
- }
- void kbase_region_tracker_term(struct kbase_context *kctx)
- {
- struct rb_node *rbnode;
- struct kbase_va_region *reg;
- do {
- rbnode = rb_first(&(kctx->reg_rbtree));
- if (rbnode) {
- rb_erase(rbnode, &(kctx->reg_rbtree));
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- kbase_free_alloced_region(reg);
- }
- } while (rbnode);
- }
- /**
- * Initialize the region tracker data structure.
- */
- int kbase_region_tracker_init(struct kbase_context *kctx)
- {
- struct kbase_va_region *same_va_reg;
- struct kbase_va_region *exec_reg = NULL;
- struct kbase_va_region *custom_va_reg = NULL;
- size_t same_va_bits = sizeof(void *) * BITS_PER_BYTE;
- u64 custom_va_size = KBASE_REG_ZONE_CUSTOM_VA_SIZE;
- u64 gpu_va_limit = (1ULL << kctx->kbdev->gpu_props.mmu.va_bits) >> PAGE_SHIFT;
- u64 same_va_pages;
- int err;
- /* Take the lock as kbase_free_alloced_region requires it */
- kbase_gpu_vm_lock(kctx);
- #if defined(CONFIG_ARM64)
- same_va_bits = VA_BITS;
- #elif defined(CONFIG_X86_64)
- same_va_bits = 47;
- #elif defined(CONFIG_64BIT)
- #error Unsupported 64-bit architecture
- #endif
- #ifdef CONFIG_64BIT
- if (kctx->is_compat)
- same_va_bits = 32;
- else if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
- same_va_bits = 33;
- #endif
- if (kctx->kbdev->gpu_props.mmu.va_bits < same_va_bits) {
- err = -EINVAL;
- goto fail_unlock;
- }
- same_va_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
- /* all have SAME_VA */
- same_va_reg = kbase_alloc_free_region(kctx, 1,
- same_va_pages,
- KBASE_REG_ZONE_SAME_VA);
- if (!same_va_reg) {
- err = -ENOMEM;
- goto fail_unlock;
- }
- #ifdef CONFIG_64BIT
- /* 32-bit clients have exec and custom VA zones */
- if (kctx->is_compat) {
- #endif
- if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE) {
- err = -EINVAL;
- goto fail_free_same_va;
- }
- /* If the current size of TMEM is out of range of the
- * virtual address space addressable by the MMU then
- * we should shrink it to fit
- */
- if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit)
- custom_va_size = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE;
- exec_reg = kbase_alloc_free_region(kctx,
- KBASE_REG_ZONE_EXEC_BASE,
- KBASE_REG_ZONE_EXEC_SIZE,
- KBASE_REG_ZONE_EXEC);
- if (!exec_reg) {
- err = -ENOMEM;
- goto fail_free_same_va;
- }
- custom_va_reg = kbase_alloc_free_region(kctx,
- KBASE_REG_ZONE_CUSTOM_VA_BASE,
- custom_va_size, KBASE_REG_ZONE_CUSTOM_VA);
- if (!custom_va_reg) {
- err = -ENOMEM;
- goto fail_free_exec;
- }
- #ifdef CONFIG_64BIT
- }
- #endif
- kbase_region_tracker_ds_init(kctx, same_va_reg, exec_reg, custom_va_reg);
- kctx->same_va_end = same_va_pages + 1;
- kbase_gpu_vm_unlock(kctx);
- return 0;
- fail_free_exec:
- kbase_free_alloced_region(exec_reg);
- fail_free_same_va:
- kbase_free_alloced_region(same_va_reg);
- fail_unlock:
- kbase_gpu_vm_unlock(kctx);
- return err;
- }
- int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages)
- {
- #ifdef CONFIG_64BIT
- struct kbase_va_region *same_va;
- struct kbase_va_region *custom_va_reg;
- u64 same_va_bits;
- u64 total_va_size;
- int err;
- /*
- * Nothing to do for 32-bit clients, JIT uses the existing
- * custom VA zone.
- */
- if (kctx->is_compat)
- return 0;
- #if defined(CONFIG_ARM64)
- same_va_bits = VA_BITS;
- #elif defined(CONFIG_X86_64)
- same_va_bits = 47;
- #elif defined(CONFIG_64BIT)
- #error Unsupported 64-bit architecture
- #endif
- if (kbase_hw_has_feature(kctx->kbdev, BASE_HW_FEATURE_33BIT_VA))
- same_va_bits = 33;
- total_va_size = (1ULL << (same_va_bits - PAGE_SHIFT)) - 1;
- kbase_gpu_vm_lock(kctx);
- /*
- * Modify the same VA free region after creation. Be careful to ensure
- * that allocations haven't been made as they could cause an overlap
- * to happen with existing same VA allocations and the custom VA zone.
- */
- same_va = kbase_region_tracker_find_region_base_address(kctx,
- PAGE_SIZE);
- if (!same_va) {
- err = -ENOMEM;
- goto fail_unlock;
- }
- /* The region flag or region size has changed since creation so bail. */
- if ((!(same_va->flags & KBASE_REG_FREE)) ||
- (same_va->nr_pages != total_va_size)) {
- err = -ENOMEM;
- goto fail_unlock;
- }
- /* It's safe to adjust the same VA zone now */
- same_va->nr_pages -= jit_va_pages;
- kctx->same_va_end -= jit_va_pages;
- /*
- * Create a custom VA zone at the end of the VA for allocations which
- * JIT can use so it doesn't have to allocate VA from the kernel.
- */
- custom_va_reg = kbase_alloc_free_region(kctx,
- kctx->same_va_end,
- jit_va_pages,
- KBASE_REG_ZONE_CUSTOM_VA);
- if (!custom_va_reg) {
- /*
- * The context will be destroyed if we fail here so no point
- * reverting the change we made to same_va.
- */
- err = -ENOMEM;
- goto fail_unlock;
- }
- kbase_region_tracker_insert(kctx, custom_va_reg);
- kbase_gpu_vm_unlock(kctx);
- return 0;
- fail_unlock:
- kbase_gpu_vm_unlock(kctx);
- return err;
- #else
- return 0;
- #endif
- }
- int kbase_mem_init(struct kbase_device *kbdev)
- {
- struct kbasep_mem_device *memdev;
- KBASE_DEBUG_ASSERT(kbdev);
- memdev = &kbdev->memdev;
- kbdev->mem_pool_max_size_default = KBASE_MEM_POOL_MAX_SIZE_KCTX;
- /* Initialize memory usage */
- atomic_set(&memdev->used_pages, 0);
- return kbase_mem_pool_init(&kbdev->mem_pool,
- KBASE_MEM_POOL_MAX_SIZE_KBDEV, kbdev, NULL);
- }
- void kbase_mem_halt(struct kbase_device *kbdev)
- {
- CSTD_UNUSED(kbdev);
- }
- void kbase_mem_term(struct kbase_device *kbdev)
- {
- struct kbasep_mem_device *memdev;
- int pages;
- KBASE_DEBUG_ASSERT(kbdev);
- memdev = &kbdev->memdev;
- pages = atomic_read(&memdev->used_pages);
- if (pages != 0)
- dev_warn(kbdev->dev, "%s: %d pages in use!\n", __func__, pages);
- kbase_mem_pool_term(&kbdev->mem_pool);
- }
- /**
- * @brief Allocate a free region object.
- *
- * The allocated object is not part of any list yet, and is flagged as
- * KBASE_REG_FREE. No mapping is allocated yet.
- *
- * zone is KBASE_REG_ZONE_CUSTOM_VA, KBASE_REG_ZONE_SAME_VA, or KBASE_REG_ZONE_EXEC
- *
- */
- struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone)
- {
- struct kbase_va_region *new_reg;
- KBASE_DEBUG_ASSERT(kctx != NULL);
- /* zone argument should only contain zone related region flags */
- KBASE_DEBUG_ASSERT((zone & ~KBASE_REG_ZONE_MASK) == 0);
- KBASE_DEBUG_ASSERT(nr_pages > 0);
- /* 64-bit address range is the max */
- KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= (U64_MAX / PAGE_SIZE));
- new_reg = kzalloc(sizeof(*new_reg), GFP_KERNEL);
- if (!new_reg)
- return NULL;
- new_reg->cpu_alloc = NULL; /* no alloc bound yet */
- new_reg->gpu_alloc = NULL; /* no alloc bound yet */
- new_reg->kctx = kctx;
- new_reg->flags = zone | KBASE_REG_FREE;
- new_reg->flags |= KBASE_REG_GROWABLE;
- new_reg->start_pfn = start_pfn;
- new_reg->nr_pages = nr_pages;
- return new_reg;
- }
- /**
- * @brief Free a region object.
- *
- * The described region must be freed of any mapping.
- *
- * If the region is not flagged as KBASE_REG_FREE, the region's
- * alloc object will be released.
- * It is a bug if no alloc object exists for non-free regions.
- *
- */
- void kbase_free_alloced_region(struct kbase_va_region *reg)
- {
- if (!(reg->flags & KBASE_REG_FREE)) {
- /*
- * The physical allocation should have been removed from the
- * eviction list before this function is called. However, in the
- * case of abnormal process termination or the app leaking the
- * memory kbase_mem_free_region is not called so it can still be
- * on the list at termination time of the region tracker.
- */
- if (!list_empty(®->gpu_alloc->evict_node)) {
- /*
- * Unlink the physical allocation before unmaking it
- * evictable so that the allocation isn't grown back to
- * its last backed size as we're going to unmap it
- * anyway.
- */
- reg->cpu_alloc->reg = NULL;
- if (reg->cpu_alloc != reg->gpu_alloc)
- reg->gpu_alloc->reg = NULL;
- /*
- * If a region has been made evictable then we must
- * unmake it before trying to free it.
- * If the memory hasn't been reclaimed it will be
- * unmapped and freed below, if it has been reclaimed
- * then the operations below are no-ops.
- */
- if (reg->flags & KBASE_REG_DONT_NEED) {
- KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
- KBASE_MEM_TYPE_NATIVE);
- kbase_mem_evictable_unmake(reg->gpu_alloc);
- }
- }
- /*
- * Remove the region from the sticky resource metadata
- * list should it be there.
- */
- kbase_sticky_resource_release(reg->kctx, NULL,
- reg->start_pfn << PAGE_SHIFT);
- kbase_mem_phy_alloc_put(reg->cpu_alloc);
- kbase_mem_phy_alloc_put(reg->gpu_alloc);
- /* To detect use-after-free in debug builds */
- KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
- }
- kfree(reg);
- }
- void kbase_mmu_update(struct kbase_context *kctx)
- {
- KBASE_DEBUG_ASSERT(kctx != NULL);
- lockdep_assert_held(&kctx->kbdev->js_data.runpool_irq.lock);
- /* ASSERT that the context has a valid as_nr, which is only the case
- * when it's scheduled in.
- *
- * as_nr won't change because the caller has the runpool_irq lock */
- KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- lockdep_assert_held(&kctx->kbdev->as[kctx->as_nr].transaction_mutex);
- kctx->kbdev->mmu_mode->update(kctx);
- }
- void kbase_mmu_disable(struct kbase_context *kctx)
- {
- KBASE_DEBUG_ASSERT(kctx != NULL);
- /* ASSERT that the context has a valid as_nr, which is only the case
- * when it's scheduled in.
- *
- * as_nr won't change because the caller has the runpool_irq lock */
- KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
- kctx->kbdev->mmu_mode->disable_as(kctx->kbdev, kctx->as_nr);
- }
- void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr)
- {
- kbdev->mmu_mode->disable_as(kbdev, as_nr);
- }
- int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align)
- {
- int err;
- size_t i = 0;
- unsigned long attr;
- unsigned long mask = ~KBASE_REG_MEMATTR_MASK;
- if ((kctx->kbdev->system_coherency == COHERENCY_ACE) &&
- (reg->flags & KBASE_REG_SHARE_BOTH))
- attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_OUTER_WA);
- else
- attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC);
- KBASE_DEBUG_ASSERT(kctx != NULL);
- KBASE_DEBUG_ASSERT(reg != NULL);
- err = kbase_add_va_region(kctx, reg, addr, nr_pages, align);
- if (err)
- return err;
- if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
- u64 stride;
- struct kbase_mem_phy_alloc *alloc;
- alloc = reg->gpu_alloc;
- stride = alloc->imported.alias.stride;
- KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased);
- for (i = 0; i < alloc->imported.alias.nents; i++) {
- if (alloc->imported.alias.aliased[i].alloc) {
- err = kbase_mmu_insert_pages(kctx,
- reg->start_pfn + (i * stride),
- alloc->imported.alias.aliased[i].alloc->pages + alloc->imported.alias.aliased[i].offset,
- alloc->imported.alias.aliased[i].length,
- reg->flags);
- if (err)
- goto bad_insert;
- kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc);
- } else {
- err = kbase_mmu_insert_single_page(kctx,
- reg->start_pfn + i * stride,
- page_to_phys(kctx->aliasing_sink_page),
- alloc->imported.alias.aliased[i].length,
- (reg->flags & mask) | attr);
- if (err)
- goto bad_insert;
- }
- }
- } else {
- err = kbase_mmu_insert_pages(kctx, reg->start_pfn,
- kbase_get_gpu_phy_pages(reg),
- kbase_reg_current_backed_size(reg),
- reg->flags);
- if (err)
- goto bad_insert;
- kbase_mem_phy_alloc_gpu_mapped(reg->gpu_alloc);
- }
- return err;
- bad_insert:
- if (reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
- u64 stride;
- stride = reg->gpu_alloc->imported.alias.stride;
- KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
- while (i--)
- if (reg->gpu_alloc->imported.alias.aliased[i].alloc) {
- kbase_mmu_teardown_pages(kctx, reg->start_pfn + (i * stride), reg->gpu_alloc->imported.alias.aliased[i].length);
- kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
- }
- }
- kbase_remove_va_region(kctx, reg);
- return err;
- }
- int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
- {
- int err;
- if (reg->start_pfn == 0)
- return 0;
- if (reg->gpu_alloc && reg->gpu_alloc->type == KBASE_MEM_TYPE_ALIAS) {
- size_t i;
- err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, reg->nr_pages);
- KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
- for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
- if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
- kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
- } else {
- err = kbase_mmu_teardown_pages(kctx, reg->start_pfn, kbase_reg_current_backed_size(reg));
- kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc);
- }
- if (err)
- return err;
- err = kbase_remove_va_region(kctx, reg);
- return err;
- }
- static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping_of_region(const struct kbase_va_region *reg, unsigned long uaddr, size_t size)
- {
- struct kbase_cpu_mapping *map;
- struct list_head *pos;
- KBASE_DEBUG_ASSERT(reg != NULL);
- KBASE_DEBUG_ASSERT(reg->cpu_alloc);
- if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */
- return NULL;
- list_for_each(pos, ®->cpu_alloc->mappings) {
- map = list_entry(pos, struct kbase_cpu_mapping, mappings_list);
- if (map->vm_start <= uaddr && map->vm_end >= uaddr + size)
- return map;
- }
- return NULL;
- }
- int kbasep_find_enclosing_cpu_mapping_offset(
- struct kbase_context *kctx, u64 gpu_addr,
- unsigned long uaddr, size_t size, u64 *offset)
- {
- struct kbase_cpu_mapping *map = NULL;
- const struct kbase_va_region *reg;
- int err = -EINVAL;
- KBASE_DEBUG_ASSERT(kctx != NULL);
- kbase_gpu_vm_lock(kctx);
- reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
- if (reg && !(reg->flags & KBASE_REG_FREE)) {
- map = kbasep_find_enclosing_cpu_mapping_of_region(reg, uaddr,
- size);
- if (map) {
- *offset = (uaddr - PTR_TO_U64(map->vm_start)) +
- (map->page_off << PAGE_SHIFT);
- err = 0;
- }
- }
- kbase_gpu_vm_unlock(kctx);
- return err;
- }
- void kbase_sync_single(struct kbase_context *kctx,
- phys_addr_t cpu_pa, phys_addr_t gpu_pa,
- off_t offset, size_t size, enum kbase_sync_type sync_fn)
- {
- struct page *cpu_page;
- cpu_page = pfn_to_page(PFN_DOWN(cpu_pa));
- if (likely(cpu_pa == gpu_pa)) {
- dma_addr_t dma_addr;
- BUG_ON(!cpu_page);
- BUG_ON(offset + size > PAGE_SIZE);
- dma_addr = kbase_dma_addr(cpu_page) + offset;
- if (sync_fn == KBASE_SYNC_TO_CPU)
- dma_sync_single_for_cpu(kctx->kbdev->dev, dma_addr,
- size, DMA_BIDIRECTIONAL);
- else if (sync_fn == KBASE_SYNC_TO_DEVICE)
- dma_sync_single_for_device(kctx->kbdev->dev, dma_addr,
- size, DMA_BIDIRECTIONAL);
- } else {
- void *src = NULL;
- void *dst = NULL;
- struct page *gpu_page;
- if (WARN(!gpu_pa, "No GPU PA found for infinite cache op"))
- return;
- gpu_page = pfn_to_page(PFN_DOWN(gpu_pa));
- if (sync_fn == KBASE_SYNC_TO_DEVICE) {
- src = ((unsigned char *)kmap(cpu_page)) + offset;
- dst = ((unsigned char *)kmap(gpu_page)) + offset;
- } else if (sync_fn == KBASE_SYNC_TO_CPU) {
- dma_sync_single_for_cpu(kctx->kbdev->dev,
- kbase_dma_addr(gpu_page) + offset,
- size, DMA_BIDIRECTIONAL);
- src = ((unsigned char *)kmap(gpu_page)) + offset;
- dst = ((unsigned char *)kmap(cpu_page)) + offset;
- }
- memcpy(dst, src, size);
- kunmap(gpu_page);
- kunmap(cpu_page);
- if (sync_fn == KBASE_SYNC_TO_DEVICE)
- dma_sync_single_for_device(kctx->kbdev->dev,
- kbase_dma_addr(gpu_page) + offset,
- size, DMA_BIDIRECTIONAL);
- }
- }
- static int kbase_do_syncset(struct kbase_context *kctx,
- struct base_syncset *set, enum kbase_sync_type sync_fn)
- {
- int err = 0;
- struct basep_syncset *sset = &set->basep_sset;
- struct kbase_va_region *reg;
- struct kbase_cpu_mapping *map;
- unsigned long start;
- size_t size;
- phys_addr_t *cpu_pa;
- phys_addr_t *gpu_pa;
- u64 page_off, page_count;
- u64 i;
- off_t offset;
- kbase_os_mem_map_lock(kctx);
- kbase_gpu_vm_lock(kctx);
- /* find the region where the virtual address is contained */
- reg = kbase_region_tracker_find_region_enclosing_address(kctx,
- sset->mem_handle.basep.handle);
- if (!reg) {
- dev_warn(kctx->kbdev->dev, "Can't find region at VA 0x%016llX",
- sset->mem_handle.basep.handle);
- err = -EINVAL;
- goto out_unlock;
- }
- if (!(reg->flags & KBASE_REG_CPU_CACHED))
- goto out_unlock;
- start = (uintptr_t)sset->user_addr;
- size = (size_t)sset->size;
- map = kbasep_find_enclosing_cpu_mapping_of_region(reg, start, size);
- if (!map) {
- dev_warn(kctx->kbdev->dev, "Can't find CPU mapping 0x%016lX for VA 0x%016llX",
- start, sset->mem_handle.basep.handle);
- err = -EINVAL;
- goto out_unlock;
- }
- offset = start & (PAGE_SIZE - 1);
- page_off = map->page_off + ((start - map->vm_start) >> PAGE_SHIFT);
- page_count = (size + offset + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
- cpu_pa = kbase_get_cpu_phy_pages(reg);
- gpu_pa = kbase_get_gpu_phy_pages(reg);
- /* Sync first page */
- if (cpu_pa[page_off]) {
- size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
- kbase_sync_single(kctx, cpu_pa[page_off], gpu_pa[page_off],
- offset, sz, sync_fn);
- }
- /* Sync middle pages (if any) */
- for (i = 1; page_count > 2 && i < page_count - 1; i++) {
- /* we grow upwards, so bail on first non-present page */
- if (!cpu_pa[page_off + i])
- break;
- kbase_sync_single(kctx, cpu_pa[page_off + i],
- gpu_pa[page_off + i], 0, PAGE_SIZE, sync_fn);
- }
- /* Sync last page (if any) */
- if (page_count > 1 && cpu_pa[page_off + page_count - 1]) {
- size_t sz = ((start + size - 1) & ~PAGE_MASK) + 1;
- kbase_sync_single(kctx, cpu_pa[page_off + page_count - 1],
- gpu_pa[page_off + page_count - 1], 0, sz,
- sync_fn);
- }
- out_unlock:
- kbase_gpu_vm_unlock(kctx);
- kbase_os_mem_map_unlock(kctx);
- return err;
- }
- int kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset)
- {
- int err = -EINVAL;
- struct basep_syncset *sset;
- KBASE_DEBUG_ASSERT(kctx != NULL);
- KBASE_DEBUG_ASSERT(syncset != NULL);
- sset = &syncset->basep_sset;
- switch (sset->type) {
- case BASE_SYNCSET_OP_MSYNC:
- err = kbase_do_syncset(kctx, syncset, KBASE_SYNC_TO_DEVICE);
- break;
- case BASE_SYNCSET_OP_CSYNC:
- err = kbase_do_syncset(kctx, syncset, KBASE_SYNC_TO_CPU);
- break;
- default:
- dev_warn(kctx->kbdev->dev, "Unknown msync op %d\n", sset->type);
- break;
- }
- return err;
- }
- /* vm lock must be held */
- int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg)
- {
- int err;
- KBASE_DEBUG_ASSERT(kctx != NULL);
- KBASE_DEBUG_ASSERT(reg != NULL);
- lockdep_assert_held(&kctx->reg_lock);
- /*
- * Unlink the physical allocation before unmaking it evictable so
- * that the allocation isn't grown back to its last backed size
- * as we're going to unmap it anyway.
- */
- reg->cpu_alloc->reg = NULL;
- if (reg->cpu_alloc != reg->gpu_alloc)
- reg->gpu_alloc->reg = NULL;
- /*
- * If a region has been made evictable then we must unmake it
- * before trying to free it.
- * If the memory hasn't been reclaimed it will be unmapped and freed
- * below, if it has been reclaimed then the operations below are no-ops.
- */
- if (reg->flags & KBASE_REG_DONT_NEED) {
- KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
- KBASE_MEM_TYPE_NATIVE);
- kbase_mem_evictable_unmake(reg->gpu_alloc);
- }
- err = kbase_gpu_munmap(kctx, reg);
- if (err) {
- dev_warn(reg->kctx->kbdev->dev, "Could not unmap from the GPU...\n");
- goto out;
- }
- if (kbase_hw_has_issue(kctx->kbdev, BASE_HW_ISSUE_6367)) {
- /* Wait for GPU to flush write buffer before freeing physical pages */
- kbase_wait_write_flush(kctx);
- }
- /* This will also free the physical pages */
- kbase_free_alloced_region(reg);
- out:
- return err;
- }
- /**
- * @brief Free the region from the GPU and unregister it.
- *
- * This function implements the free operation on a memory segment.
- * It will loudly fail if called with outstanding mappings.
- */
- int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
- {
- int err = 0;
- struct kbase_va_region *reg;
- KBASE_DEBUG_ASSERT(kctx != NULL);
- if (gpu_addr == 0) {
- dev_warn(kctx->kbdev->dev, "gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using kbase_mem_free\n");
- return -EINVAL;
- }
- kbase_gpu_vm_lock(kctx);
- if (gpu_addr >= BASE_MEM_COOKIE_BASE &&
- gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) {
- int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE);
- reg = kctx->pending_regions[cookie];
- if (!reg) {
- err = -EINVAL;
- goto out_unlock;
- }
- /* ask to unlink the cookie as we'll free it */
- kctx->pending_regions[cookie] = NULL;
- kctx->cookies |= (1UL << cookie);
- kbase_free_alloced_region(reg);
- } else {
- /* A real GPU va */
- /* Validate the region */
- reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
- if (!reg || (reg->flags & KBASE_REG_FREE)) {
- dev_warn(kctx->kbdev->dev, "kbase_mem_free called with nonexistent gpu_addr 0x%llX",
- gpu_addr);
- err = -EINVAL;
- goto out_unlock;
- }
- if ((reg->flags & KBASE_REG_ZONE_MASK) == KBASE_REG_ZONE_SAME_VA) {
- /* SAME_VA must be freed through munmap */
- dev_warn(kctx->kbdev->dev, "%s called on SAME_VA memory 0x%llX", __func__,
- gpu_addr);
- err = -EINVAL;
- goto out_unlock;
- }
- err = kbase_mem_free_region(kctx, reg);
- }
- out_unlock:
- kbase_gpu_vm_unlock(kctx);
- return err;
- }
- void kbase_update_region_flags(struct kbase_context *kctx,
- struct kbase_va_region *reg, unsigned long flags)
- {
- KBASE_DEBUG_ASSERT(reg != NULL);
- KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
- reg->flags |= kbase_cache_enabled(flags, reg->nr_pages);
- /* all memory is now growable */
- reg->flags |= KBASE_REG_GROWABLE;
- if (flags & BASE_MEM_GROW_ON_GPF)
- reg->flags |= KBASE_REG_PF_GROW;
- if (flags & BASE_MEM_PROT_CPU_WR)
- reg->flags |= KBASE_REG_CPU_WR;
- if (flags & BASE_MEM_PROT_CPU_RD)
- reg->flags |= KBASE_REG_CPU_RD;
- if (flags & BASE_MEM_PROT_GPU_WR)
- reg->flags |= KBASE_REG_GPU_WR;
- if (flags & BASE_MEM_PROT_GPU_RD)
- reg->flags |= KBASE_REG_GPU_RD;
- if (0 == (flags & BASE_MEM_PROT_GPU_EX))
- reg->flags |= KBASE_REG_GPU_NX;
- if (flags & BASE_MEM_COHERENT_SYSTEM ||
- flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED)
- reg->flags |= KBASE_REG_SHARE_BOTH;
- else if (flags & BASE_MEM_COHERENT_LOCAL)
- reg->flags |= KBASE_REG_SHARE_IN;
- /* Set up default MEMATTR usage */
- if (kctx->kbdev->system_coherency == COHERENCY_ACE &&
- (reg->flags & KBASE_REG_SHARE_BOTH)) {
- reg->flags |=
- KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT_ACE);
- } else {
- reg->flags |=
- KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
- }
- }
- int kbase_alloc_phy_pages_helper(
- struct kbase_mem_phy_alloc *alloc,
- size_t nr_pages_requested)
- {
- int new_page_count __maybe_unused;
- size_t old_page_count = alloc->nents;
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
- KBASE_DEBUG_ASSERT(alloc->imported.kctx);
- if (nr_pages_requested == 0)
- goto done; /*nothing to do*/
- new_page_count = kbase_atomic_add_pages(
- nr_pages_requested, &alloc->imported.kctx->used_pages);
- kbase_atomic_add_pages(nr_pages_requested, &alloc->imported.kctx->kbdev->memdev.used_pages);
- /* Increase mm counters before we allocate pages so that this
- * allocation is visible to the OOM killer */
- kbase_process_page_usage_inc(alloc->imported.kctx, nr_pages_requested);
- if (kbase_mem_pool_alloc_pages(&alloc->imported.kctx->mem_pool,
- nr_pages_requested, alloc->pages + old_page_count) != 0)
- goto no_alloc;
- /*
- * Request a zone cache update, this scans only the new pages an
- * appends their information to the zone cache. if the update
- * fails then clear the cache so we fall-back to doing things
- * page by page.
- */
- if (kbase_zone_cache_update(alloc, old_page_count) != 0)
- kbase_zone_cache_clear(alloc);
- kbase_tlstream_aux_pagesalloc(
- (u32)alloc->imported.kctx->id,
- (u64)new_page_count);
- alloc->nents += nr_pages_requested;
- done:
- return 0;
- no_alloc:
- kbase_process_page_usage_dec(alloc->imported.kctx, nr_pages_requested);
- kbase_atomic_sub_pages(nr_pages_requested, &alloc->imported.kctx->used_pages);
- kbase_atomic_sub_pages(nr_pages_requested, &alloc->imported.kctx->kbdev->memdev.used_pages);
- return -ENOMEM;
- }
- int kbase_free_phy_pages_helper(
- struct kbase_mem_phy_alloc *alloc,
- size_t nr_pages_to_free)
- {
- struct kbase_context *kctx = alloc->imported.kctx;
- bool syncback;
- bool reclaimed = (alloc->evicted != 0);
- phys_addr_t *start_free;
- int new_page_count __maybe_unused;
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
- KBASE_DEBUG_ASSERT(alloc->imported.kctx);
- KBASE_DEBUG_ASSERT(alloc->nents >= nr_pages_to_free);
- /* early out if nothing to do */
- if (nr_pages_to_free == 0)
- return 0;
- start_free = alloc->pages + alloc->nents - nr_pages_to_free;
- syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
- /*
- * Clear the zone cache, we don't expect JIT allocations to be
- * shrunk in parts so there is no point trying to optimize for that
- * by scanning for the changes caused by freeing this memory and
- * updating the existing cache entries.
- */
- kbase_zone_cache_clear(alloc);
- kbase_mem_pool_free_pages(&kctx->mem_pool,
- nr_pages_to_free,
- start_free,
- syncback,
- reclaimed);
- alloc->nents -= nr_pages_to_free;
- /*
- * If the allocation was not evicted (i.e. evicted == 0) then
- * the page accounting needs to be done.
- */
- if (!reclaimed) {
- kbase_process_page_usage_dec(kctx, nr_pages_to_free);
- new_page_count = kbase_atomic_sub_pages(nr_pages_to_free,
- &kctx->used_pages);
- kbase_atomic_sub_pages(nr_pages_to_free,
- &kctx->kbdev->memdev.used_pages);
- kbase_tlstream_aux_pagesalloc(
- (u32)kctx->id,
- (u64)new_page_count);
- }
- return 0;
- }
- void kbase_mem_kref_free(struct kref *kref)
- {
- struct kbase_mem_phy_alloc *alloc;
- alloc = container_of(kref, struct kbase_mem_phy_alloc, kref);
- switch (alloc->type) {
- case KBASE_MEM_TYPE_NATIVE: {
- WARN_ON(!alloc->imported.kctx);
- /*
- * The physical allocation must have been removed from the
- * eviction list before trying to free it.
- */
- WARN_ON(!list_empty(&alloc->evict_node));
- kbase_free_phy_pages_helper(alloc, alloc->nents);
- break;
- }
- case KBASE_MEM_TYPE_ALIAS: {
- /* just call put on the underlying phy allocs */
- size_t i;
- struct kbase_aliased *aliased;
- aliased = alloc->imported.alias.aliased;
- if (aliased) {
- for (i = 0; i < alloc->imported.alias.nents; i++)
- if (aliased[i].alloc)
- kbase_mem_phy_alloc_put(aliased[i].alloc);
- vfree(aliased);
- }
- break;
- }
- case KBASE_MEM_TYPE_RAW:
- /* raw pages, external cleanup */
- break;
- #ifdef CONFIG_UMP
- case KBASE_MEM_TYPE_IMPORTED_UMP:
- ump_dd_release(alloc->imported.ump_handle);
- break;
- #endif
- #ifdef CONFIG_DMA_SHARED_BUFFER
- case KBASE_MEM_TYPE_IMPORTED_UMM:
- dma_buf_detach(alloc->imported.umm.dma_buf,
- alloc->imported.umm.dma_attachment);
- dma_buf_put(alloc->imported.umm.dma_buf);
- break;
- #endif
- case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
- if (alloc->imported.user_buf.mm)
- mmdrop(alloc->imported.user_buf.mm);
- kfree(alloc->imported.user_buf.pages);
- break;
- case KBASE_MEM_TYPE_TB:{
- void *tb;
- tb = alloc->imported.kctx->jctx.tb;
- kbase_device_trace_buffer_uninstall(alloc->imported.kctx);
- vfree(tb);
- break;
- }
- default:
- WARN(1, "Unexecpted free of type %d\n", alloc->type);
- break;
- }
- /* Free based on allocation type */
- if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
- vfree(alloc);
- else
- kfree(alloc);
- }
- int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size)
- {
- KBASE_DEBUG_ASSERT(reg != NULL);
- KBASE_DEBUG_ASSERT(vsize > 0);
- /* validate user provided arguments */
- if (size > vsize || vsize > reg->nr_pages)
- goto out_term;
- /* Prevent vsize*sizeof from wrapping around.
- * For instance, if vsize is 2**29+1, we'll allocate 1 byte and the alloc won't fail.
- */
- if ((size_t) vsize > ((size_t) -1 / sizeof(*reg->cpu_alloc->pages)))
- goto out_term;
- KBASE_DEBUG_ASSERT(vsize != 0);
- if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, size) != 0)
- goto out_term;
- reg->cpu_alloc->reg = reg;
- if (reg->cpu_alloc != reg->gpu_alloc) {
- if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, size) != 0)
- goto out_rollback;
- reg->gpu_alloc->reg = reg;
- }
- return 0;
- out_rollback:
- kbase_free_phy_pages_helper(reg->cpu_alloc, size);
- out_term:
- return -1;
- }
- bool kbase_check_alloc_flags(unsigned long flags)
- {
- /* Only known input flags should be set. */
- if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
- return false;
- /* At least one flag should be set */
- if (flags == 0)
- return false;
- /* Either the GPU or CPU must be reading from the allocated memory */
- if ((flags & (BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD)) == 0)
- return false;
- /* Either the GPU or CPU must be writing to the allocated memory */
- if ((flags & (BASE_MEM_PROT_CPU_WR | BASE_MEM_PROT_GPU_WR)) == 0)
- return false;
- /* GPU cannot be writing to GPU executable memory and cannot grow the memory on page fault. */
- if ((flags & BASE_MEM_PROT_GPU_EX) && (flags & (BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF)))
- return false;
- /* GPU should have at least read or write access otherwise there is no
- reason for allocating. */
- if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
- return false;
- /* BASE_MEM_IMPORT_SHARED is only valid for imported memory */
- if ((flags & BASE_MEM_IMPORT_SHARED) == BASE_MEM_IMPORT_SHARED)
- return false;
- return true;
- }
- bool kbase_check_import_flags(unsigned long flags)
- {
- /* Only known input flags should be set. */
- if (flags & ~BASE_MEM_FLAGS_INPUT_MASK)
- return false;
- /* At least one flag should be set */
- if (flags == 0)
- return false;
- /* Imported memory cannot be GPU executable */
- if (flags & BASE_MEM_PROT_GPU_EX)
- return false;
- /* Imported memory cannot grow on page fault */
- if (flags & BASE_MEM_GROW_ON_GPF)
- return false;
- /* GPU should have at least read or write access otherwise there is no
- reason for importing. */
- if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
- return false;
- /* Secure memory cannot be read by the CPU */
- if ((flags & BASE_MEM_SECURE) && (flags & BASE_MEM_PROT_CPU_RD))
- return false;
- return true;
- }
- /**
- * @brief Acquire the per-context region list lock
- */
- void kbase_gpu_vm_lock(struct kbase_context *kctx)
- {
- KBASE_DEBUG_ASSERT(kctx != NULL);
- mutex_lock(&kctx->reg_lock);
- }
- /**
- * @brief Release the per-context region list lock
- */
- void kbase_gpu_vm_unlock(struct kbase_context *kctx)
- {
- KBASE_DEBUG_ASSERT(kctx != NULL);
- mutex_unlock(&kctx->reg_lock);
- }
- /**
- * kbase_jit_destroy_worker - Deferred worker which frees JIT allocations
- * @work: Work item
- *
- * This function does the work of freeing JIT allocations whose physical
- * backing has been released.
- */
- static void kbase_jit_destroy_worker(struct work_struct *work)
- {
- struct kbase_context *kctx;
- struct kbase_va_region *reg;
- kctx = container_of(work, struct kbase_context, jit_work);
- do {
- mutex_lock(&kctx->jit_lock);
- if (list_empty(&kctx->jit_destroy_head))
- reg = NULL;
- else
- reg = list_first_entry(&kctx->jit_destroy_head,
- struct kbase_va_region, jit_node);
- if (reg) {
- list_del(®->jit_node);
- mutex_unlock(&kctx->jit_lock);
- kbase_gpu_vm_lock(kctx);
- kbase_mem_free_region(kctx, reg);
- kbase_gpu_vm_unlock(kctx);
- } else
- mutex_unlock(&kctx->jit_lock);
- } while (reg);
- }
- int kbase_jit_init(struct kbase_context *kctx)
- {
- INIT_LIST_HEAD(&kctx->jit_active_head);
- INIT_LIST_HEAD(&kctx->jit_pool_head);
- INIT_LIST_HEAD(&kctx->jit_destroy_head);
- mutex_init(&kctx->jit_lock);
- INIT_WORK(&kctx->jit_work, kbase_jit_destroy_worker);
- return 0;
- }
- struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
- struct base_jit_alloc_info *info)
- {
- struct kbase_va_region *reg = NULL;
- struct kbase_va_region *walker;
- struct kbase_va_region *temp;
- size_t current_diff = SIZE_MAX;
- int ret;
- mutex_lock(&kctx->jit_lock);
- /*
- * Scan the pool for an existing allocation which meets our
- * requirements and remove it.
- */
- list_for_each_entry_safe(walker, temp, &kctx->jit_pool_head, jit_node) {
- if (walker->nr_pages >= info->va_pages) {
- size_t min_size, max_size, diff;
- /*
- * The JIT allocations VA requirements have been
- * meet, it's suitable but other allocations
- * might be a better fit.
- */
- min_size = min_t(size_t, walker->gpu_alloc->nents,
- info->commit_pages);
- max_size = max_t(size_t, walker->gpu_alloc->nents,
- info->commit_pages);
- diff = max_size - min_size;
- if (current_diff > diff) {
- current_diff = diff;
- reg = walker;
- }
- /* The allocation is an exact match, stop looking */
- if (current_diff == 0)
- break;
- }
- }
- if (reg) {
- /*
- * Remove the found region from the pool and add it to the
- * active list.
- */
- list_del_init(®->jit_node);
- list_add(®->jit_node, &kctx->jit_active_head);
- /* Release the jit lock before modifying the allocation */
- mutex_unlock(&kctx->jit_lock);
- kbase_gpu_vm_lock(kctx);
- /* Make the physical backing no longer reclaimable */
- if (!kbase_mem_evictable_unmake(reg->gpu_alloc))
- goto update_failed;
- /* Grow the backing if required */
- if (reg->gpu_alloc->nents < info->commit_pages) {
- size_t delta;
- size_t old_size = reg->gpu_alloc->nents;
- /* Allocate some more pages */
- delta = info->commit_pages - reg->gpu_alloc->nents;
- if (kbase_alloc_phy_pages_helper(reg->gpu_alloc, delta)
- != 0)
- goto update_failed;
- if (reg->cpu_alloc != reg->gpu_alloc) {
- if (kbase_alloc_phy_pages_helper(
- reg->cpu_alloc, delta) != 0) {
- kbase_free_phy_pages_helper(
- reg->gpu_alloc, delta);
- goto update_failed;
- }
- }
- ret = kbase_mem_grow_gpu_mapping(kctx, reg,
- info->commit_pages, old_size);
- /*
- * The grow failed so put the allocation back in the
- * pool and return failure.
- */
- if (ret)
- goto update_failed;
- }
- kbase_gpu_vm_unlock(kctx);
- } else {
- /* No suitable JIT allocation was found so create a new one */
- u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD |
- BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
- BASE_MEM_COHERENT_LOCAL;
- u64 gpu_addr;
- u16 alignment;
- mutex_unlock(&kctx->jit_lock);
- reg = kbase_mem_alloc(kctx, info->va_pages, info->commit_pages,
- info->extent, &flags, &gpu_addr, &alignment);
- if (!reg)
- goto out_unlocked;
- mutex_lock(&kctx->jit_lock);
- list_add(®->jit_node, &kctx->jit_active_head);
- mutex_unlock(&kctx->jit_lock);
- }
- return reg;
- update_failed:
- /*
- * An update to an allocation from the pool failed, chances
- * are slim a new allocation would fair any better so return
- * the allocation to the pool and return the function with failure.
- */
- kbase_gpu_vm_unlock(kctx);
- mutex_lock(&kctx->jit_lock);
- list_del_init(®->jit_node);
- list_add(®->jit_node, &kctx->jit_pool_head);
- mutex_unlock(&kctx->jit_lock);
- out_unlocked:
- return NULL;
- }
- void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg)
- {
- /* The physical backing of memory in the pool is always reclaimable */
- down_read(&kctx->process_mm->mmap_sem);
- kbase_gpu_vm_lock(kctx);
- kbase_mem_evictable_make(reg->gpu_alloc);
- kbase_gpu_vm_unlock(kctx);
- up_read(&kctx->process_mm->mmap_sem);
- mutex_lock(&kctx->jit_lock);
- list_del_init(®->jit_node);
- list_add(®->jit_node, &kctx->jit_pool_head);
- mutex_unlock(&kctx->jit_lock);
- }
- void kbase_jit_backing_lost(struct kbase_va_region *reg)
- {
- struct kbase_context *kctx = reg->kctx;
- /*
- * JIT allocations will always be on a list, if the region
- * is not on a list then it's not a JIT allocation.
- */
- if (list_empty(®->jit_node))
- return;
- /*
- * Freeing the allocation requires locks we might not be able
- * to take now, so move the allocation to the free list and kick
- * the worker which will do the freeing.
- */
- mutex_lock(&kctx->jit_lock);
- list_del_init(®->jit_node);
- list_add(®->jit_node, &kctx->jit_destroy_head);
- mutex_unlock(&kctx->jit_lock);
- schedule_work(&kctx->jit_work);
- }
- bool kbase_jit_evict(struct kbase_context *kctx)
- {
- struct kbase_va_region *reg = NULL;
- lockdep_assert_held(&kctx->reg_lock);
- /* Free the oldest allocation from the pool */
- mutex_lock(&kctx->jit_lock);
- if (!list_empty(&kctx->jit_pool_head)) {
- reg = list_entry(kctx->jit_pool_head.prev,
- struct kbase_va_region, jit_node);
- list_del(®->jit_node);
- }
- mutex_unlock(&kctx->jit_lock);
- if (reg)
- kbase_mem_free_region(kctx, reg);
- return (reg != NULL);
- }
- void kbase_jit_term(struct kbase_context *kctx)
- {
- struct kbase_va_region *walker;
- /* Free all allocations for this context */
- /*
- * Flush the freeing of allocations whose backing has been freed
- * (i.e. everything in jit_destroy_head).
- */
- cancel_work_sync(&kctx->jit_work);
- kbase_gpu_vm_lock(kctx);
- /* Free all allocations from the pool */
- while (!list_empty(&kctx->jit_pool_head)) {
- walker = list_first_entry(&kctx->jit_pool_head,
- struct kbase_va_region, jit_node);
- list_del(&walker->jit_node);
- kbase_mem_free_region(kctx, walker);
- }
- /* Free all allocations from active list */
- while (!list_empty(&kctx->jit_active_head)) {
- walker = list_first_entry(&kctx->jit_active_head,
- struct kbase_va_region, jit_node);
- list_del(&walker->jit_node);
- kbase_mem_free_region(kctx, walker);
- }
- kbase_gpu_vm_unlock(kctx);
- }
- static int kbase_jd_user_buf_map(struct kbase_context *kctx,
- struct kbase_va_region *reg)
- {
- long pinned_pages;
- struct kbase_mem_phy_alloc *alloc;
- struct page **pages;
- phys_addr_t *pa;
- long i;
- int err = -ENOMEM;
- unsigned long address;
- struct mm_struct *mm;
- struct device *dev;
- unsigned long offset;
- unsigned long local_size;
- alloc = reg->gpu_alloc;
- pa = kbase_get_gpu_phy_pages(reg);
- address = alloc->imported.user_buf.address;
- mm = alloc->imported.user_buf.mm;
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
- pages = alloc->imported.user_buf.pages;
- pinned_pages = get_user_pages(
- address,
- alloc->imported.user_buf.nr_pages,
- (reg->flags & KBASE_REG_GPU_WR) ? FOLL_WRITE : 0,
- pages, NULL);
- if (pinned_pages <= 0)
- return pinned_pages;
- if (pinned_pages != alloc->imported.user_buf.nr_pages) {
- for (i = 0; i < pinned_pages; i++)
- put_page(pages[i]);
- return -ENOMEM;
- }
- dev = kctx->kbdev->dev;
- offset = address & ~PAGE_MASK;
- local_size = alloc->imported.user_buf.size;
- for (i = 0; i < pinned_pages; i++) {
- dma_addr_t dma_addr;
- unsigned long min;
- min = MIN(PAGE_SIZE - offset, local_size);
- dma_addr = dma_map_page(dev, pages[i],
- offset, min,
- DMA_BIDIRECTIONAL);
- if (dma_mapping_error(dev, dma_addr))
- goto unwind;
- alloc->imported.user_buf.dma_addrs[i] = dma_addr;
- pa[i] = page_to_phys(pages[i]);
- local_size -= min;
- offset = 0;
- }
- alloc->nents = pinned_pages;
- err = kbase_mmu_insert_pages(kctx, reg->start_pfn, pa,
- kbase_reg_current_backed_size(reg),
- reg->flags);
- if (err == 0)
- return 0;
- alloc->nents = 0;
- /* fall down */
- unwind:
- while (i--) {
- dma_unmap_page(kctx->kbdev->dev,
- alloc->imported.user_buf.dma_addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
- put_page(pages[i]);
- pages[i] = NULL;
- }
- return err;
- }
- static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
- struct kbase_mem_phy_alloc *alloc, bool writeable)
- {
- long i;
- struct page **pages;
- unsigned long size = alloc->imported.user_buf.size;
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
- pages = alloc->imported.user_buf.pages;
- for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) {
- unsigned long local_size;
- dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i];
- local_size = MIN(size, PAGE_SIZE - (dma_addr & ~PAGE_MASK));
- dma_unmap_page(kctx->kbdev->dev, dma_addr, local_size,
- DMA_BIDIRECTIONAL);
- if (writeable)
- set_page_dirty_lock(pages[i]);
- put_page(pages[i]);
- pages[i] = NULL;
- size -= local_size;
- }
- alloc->nents = 0;
- }
- #ifdef CONFIG_DMA_SHARED_BUFFER
- extern int kbase_jd_umm_map(struct kbase_context *kctx,
- struct kbase_va_region *reg)
- {
- struct sg_table *sgt;
- struct scatterlist *s;
- int i;
- phys_addr_t *pa;
- int err;
- size_t count = 0;
- struct kbase_mem_phy_alloc *alloc;
- alloc = reg->gpu_alloc;
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM);
- KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt == NULL);
- sgt = dma_buf_map_attachment(alloc->imported.umm.dma_attachment,
- DMA_BIDIRECTIONAL);
- if (IS_ERR_OR_NULL(sgt))
- return -EINVAL;
- /* save for later */
- alloc->imported.umm.sgt = sgt;
- pa = kbase_get_gpu_phy_pages(reg);
- KBASE_DEBUG_ASSERT(pa);
- for_each_sg(sgt->sgl, s, sgt->nents, i) {
- int j;
- size_t pages = PFN_UP(sg_dma_len(s));
- WARN_ONCE(sg_dma_len(s) & (PAGE_SIZE-1),
- "sg_dma_len(s)=%u is not a multiple of PAGE_SIZE\n",
- sg_dma_len(s));
- WARN_ONCE(sg_dma_address(s) & (PAGE_SIZE-1),
- "sg_dma_address(s)=%llx is not aligned to PAGE_SIZE\n",
- (unsigned long long) sg_dma_address(s));
- for (j = 0; (j < pages) && (count < reg->nr_pages); j++,
- count++)
- *pa++ = sg_dma_address(s) + (j << PAGE_SHIFT);
- WARN_ONCE(j < pages,
- "sg list from dma_buf_map_attachment > dma_buf->size=%zu\n",
- alloc->imported.umm.dma_buf->size);
- }
- if (WARN_ONCE(count < reg->nr_pages,
- "sg list from dma_buf_map_attachment < dma_buf->size=%zu\n",
- alloc->imported.umm.dma_buf->size)) {
- err = -EINVAL;
- goto out;
- }
- /* Update nents as we now have pages to map */
- alloc->nents = count;
- err = kbase_mmu_insert_pages(kctx, reg->start_pfn,
- kbase_get_gpu_phy_pages(reg),
- kbase_reg_current_backed_size(reg),
- reg->flags | KBASE_REG_GPU_WR | KBASE_REG_GPU_RD);
- out:
- if (err) {
- dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
- alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
- alloc->imported.umm.sgt = NULL;
- }
- return err;
- }
- extern void kbase_jd_umm_unmap(struct kbase_context *kctx,
- struct kbase_mem_phy_alloc *alloc)
- {
- KBASE_DEBUG_ASSERT(kctx);
- KBASE_DEBUG_ASSERT(alloc);
- KBASE_DEBUG_ASSERT(alloc->imported.umm.dma_attachment);
- KBASE_DEBUG_ASSERT(alloc->imported.umm.sgt);
- dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
- alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
- alloc->imported.umm.sgt = NULL;
- alloc->nents = 0;
- }
- #endif /* CONFIG_DMA_SHARED_BUFFER */
- #if (defined(CONFIG_KDS) && defined(CONFIG_UMP)) \
- || defined(CONFIG_DMA_SHARED_BUFFER_USES_KDS)
- static void add_kds_resource(struct kds_resource *kds_res,
- struct kds_resource **kds_resources, u32 *kds_res_count,
- unsigned long *kds_access_bitmap, bool exclusive)
- {
- u32 i;
- for (i = 0; i < *kds_res_count; i++) {
- /* Duplicate resource, ignore */
- if (kds_resources[i] == kds_res)
- return;
- }
- kds_resources[*kds_res_count] = kds_res;
- if (exclusive)
- set_bit(*kds_res_count, kds_access_bitmap);
- (*kds_res_count)++;
- }
- #endif
- struct kbase_mem_phy_alloc *kbase_map_external_resource(
- struct kbase_context *kctx, struct kbase_va_region *reg,
- struct mm_struct *locked_mm
- #ifdef CONFIG_KDS
- , u32 *kds_res_count, struct kds_resource **kds_resources,
- unsigned long *kds_access_bitmap, bool exclusive
- #endif
- )
- {
- int err;
- /* decide what needs to happen for this resource */
- switch (reg->gpu_alloc->type) {
- case BASE_MEM_IMPORT_TYPE_USER_BUFFER: {
- if (reg->gpu_alloc->imported.user_buf.mm != locked_mm)
- goto exit;
- reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
- if (reg->gpu_alloc->imported.user_buf.current_mapping_usage_count == 1) {
- err = kbase_jd_user_buf_map(kctx, reg);
- if (err) {
- reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--;
- goto exit;
- }
- }
- }
- break;
- case BASE_MEM_IMPORT_TYPE_UMP: {
- #if defined(CONFIG_KDS) && defined(CONFIG_UMP)
- if (kds_res_count) {
- struct kds_resource *kds_res;
- kds_res = ump_dd_kds_resource_get(
- reg->gpu_alloc->imported.ump_handle);
- if (kds_res)
- add_kds_resource(kds_res, kds_resources,
- kds_res_count,
- kds_access_bitmap, exclusive);
- }
- #endif /*defined(CONFIG_KDS) && defined(CONFIG_UMP) */
- break;
- }
- #ifdef CONFIG_DMA_SHARED_BUFFER
- case BASE_MEM_IMPORT_TYPE_UMM: {
- #ifdef CONFIG_DMA_SHARED_BUFFER_USES_KDS
- if (kds_res_count) {
- struct kds_resource *kds_res;
- kds_res = get_dma_buf_kds_resource(
- reg->gpu_alloc->imported.umm.dma_buf);
- if (kds_res)
- add_kds_resource(kds_res, kds_resources,
- kds_res_count,
- kds_access_bitmap, exclusive);
- }
- #endif
- reg->gpu_alloc->imported.umm.current_mapping_usage_count++;
- if (reg->gpu_alloc->imported.umm.current_mapping_usage_count == 1) {
- err = kbase_jd_umm_map(kctx, reg);
- if (err) {
- reg->gpu_alloc->imported.umm.current_mapping_usage_count--;
- goto exit;
- }
- }
- break;
- }
- #endif
- default:
- goto exit;
- }
- return kbase_mem_phy_alloc_get(reg->gpu_alloc);
- exit:
- return NULL;
- }
- void kbase_unmap_external_resource(struct kbase_context *kctx,
- struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc)
- {
- switch (alloc->type) {
- #ifdef CONFIG_DMA_SHARED_BUFFER
- case KBASE_MEM_TYPE_IMPORTED_UMM: {
- alloc->imported.umm.current_mapping_usage_count--;
- if (alloc->imported.umm.current_mapping_usage_count == 0) {
- if (reg && reg->gpu_alloc == alloc)
- kbase_mmu_teardown_pages(
- kctx,
- reg->start_pfn,
- kbase_reg_current_backed_size(reg));
- kbase_jd_umm_unmap(kctx, alloc);
- }
- }
- break;
- #endif /* CONFIG_DMA_SHARED_BUFFER */
- case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
- alloc->imported.user_buf.current_mapping_usage_count--;
- if (alloc->imported.user_buf.current_mapping_usage_count == 0) {
- bool writeable = true;
- if (reg && reg->gpu_alloc == alloc)
- kbase_mmu_teardown_pages(
- kctx,
- reg->start_pfn,
- kbase_reg_current_backed_size(reg));
- if (reg && ((reg->flags & KBASE_REG_GPU_WR) == 0))
- writeable = false;
- kbase_jd_user_buf_unmap(kctx, alloc, writeable);
- }
- }
- break;
- default:
- break;
- }
- kbase_mem_phy_alloc_put(alloc);
- }
- struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
- struct kbase_context *kctx, u64 gpu_addr)
- {
- struct kbase_ctx_ext_res_meta *meta = NULL;
- struct kbase_ctx_ext_res_meta *walker;
- lockdep_assert_held(&kctx->reg_lock);
- /*
- * Walk the per context external resource metadata list for the
- * metadata which matches the region which is being acquired.
- */
- list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) {
- if (walker->gpu_addr == gpu_addr) {
- meta = walker;
- break;
- }
- }
- /* No metadata exists so create one. */
- if (!meta) {
- struct kbase_va_region *reg;
- /* Find the region */
- reg = kbase_region_tracker_find_region_enclosing_address(
- kctx, gpu_addr);
- if (NULL == reg || (reg->flags & KBASE_REG_FREE))
- goto failed;
- /* Allocate the metadata object */
- meta = kzalloc(sizeof(*meta), GFP_KERNEL);
- if (!meta)
- goto failed;
- /*
- * Fill in the metadata object and acquire a reference
- * for the physical resource.
- */
- meta->alloc = kbase_map_external_resource(kctx, reg, NULL
- #ifdef CONFIG_KDS
- , NULL, NULL,
- NULL, false
- #endif
- );
- if (!meta->alloc)
- goto fail_map;
- meta->gpu_addr = reg->start_pfn << PAGE_SHIFT;
- list_add(&meta->ext_res_node, &kctx->ext_res_meta_head);
- }
- return meta;
- fail_map:
- kfree(meta);
- failed:
- return NULL;
- }
- bool kbase_sticky_resource_release(struct kbase_context *kctx,
- struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr)
- {
- struct kbase_ctx_ext_res_meta *walker;
- struct kbase_va_region *reg;
- lockdep_assert_held(&kctx->reg_lock);
- /* Search of the metadata if one isn't provided. */
- if (!meta) {
- /*
- * Walk the per context external resource metadata list for the
- * metadata which matches the region which is being released.
- */
- list_for_each_entry(walker, &kctx->ext_res_meta_head,
- ext_res_node) {
- if (walker->gpu_addr == gpu_addr) {
- meta = walker;
- break;
- }
- }
- }
- /* No metadata so just return. */
- if (!meta)
- return false;
- /* Drop the physical memory reference and free the metadata. */
- reg = kbase_region_tracker_find_region_enclosing_address(
- kctx,
- meta->gpu_addr);
- kbase_unmap_external_resource(kctx, reg, meta->alloc);
- list_del(&meta->ext_res_node);
- kfree(meta);
- return true;
- }
- int kbase_sticky_resource_init(struct kbase_context *kctx)
- {
- INIT_LIST_HEAD(&kctx->ext_res_meta_head);
- return 0;
- }
- void kbase_sticky_resource_term(struct kbase_context *kctx)
- {
- struct kbase_ctx_ext_res_meta *walker;
- lockdep_assert_held(&kctx->reg_lock);
- /*
- * Free any sticky resources which haven't been unmapped.
- *
- * Note:
- * We don't care about refcounts at this point as no future
- * references to the meta data will be made.
- * Region termination would find these if we didn't free them
- * here, but it's more efficient if we do the clean up here.
- */
- while (!list_empty(&kctx->ext_res_meta_head)) {
- walker = list_first_entry(&kctx->ext_res_meta_head,
- struct kbase_ctx_ext_res_meta, ext_res_node);
- kbase_sticky_resource_release(kctx, walker, 0);
- }
- }
|