1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030 |
- #ifndef _KBASE_MEM_H_
- #define _KBASE_MEM_H_
- #ifndef _KBASE_H_
- #error "Don't include this file directly, use mali_kbase.h instead"
- #endif
- #include <linux/kref.h>
- #ifdef CONFIG_KDS
- #include <linux/kds.h>
- #endif
- #ifdef CONFIG_UMP
- #include <linux/ump.h>
- #endif
- #include "mali_base_kernel.h"
- #include <mali_kbase_hw.h>
- #include "mali_kbase_pm.h"
- #include "mali_kbase_defs.h"
- #include "mali_kbase_mem_linux.h"
- #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */
- #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */
- #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0) /* round to 1 page */
- #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
- #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
- #define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)
- struct kbase_cpu_mapping {
- struct list_head mappings_list;
- struct kbase_mem_phy_alloc *alloc;
- struct kbase_context *kctx;
- struct kbase_va_region *region;
- pgoff_t page_off;
- int count;
- unsigned long vm_start;
- unsigned long vm_end;
- };
- enum kbase_memory_type {
- KBASE_MEM_TYPE_NATIVE,
- KBASE_MEM_TYPE_IMPORTED_UMP,
- KBASE_MEM_TYPE_IMPORTED_UMM,
- KBASE_MEM_TYPE_IMPORTED_USER_BUF,
- KBASE_MEM_TYPE_ALIAS,
- KBASE_MEM_TYPE_TB,
- KBASE_MEM_TYPE_RAW
- };
- struct kbase_aliased {
- struct kbase_mem_phy_alloc *alloc;
- u64 offset;
- u64 length;
- };
- #define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED (1ul << 0)
- #define KBASE_MEM_PHY_ALLOC_LARGE (1ul << 1)
- struct kbase_mem_phy_alloc {
- struct kref kref;
- atomic_t gpu_mappings;
- size_t nents;
- phys_addr_t *pages;
-
- struct list_head mappings;
-
- struct list_head evict_node;
-
- size_t evicted;
-
- struct kbase_va_region *reg;
-
- enum kbase_memory_type type;
- unsigned long properties;
- struct list_head zone_cache;
-
- union {
- #ifdef CONFIG_UMP
- ump_dd_handle ump_handle;
- #endif
- #ifdef CONFIG_DMA_SHARED_BUFFER
- struct {
- struct dma_buf *dma_buf;
- struct dma_buf_attachment *dma_attachment;
- unsigned int current_mapping_usage_count;
- struct sg_table *sgt;
- } umm;
- #endif // ifdef CONFIG_DMA_SHARED_BUFFER
- struct {
- u64 stride;
- size_t nents;
- struct kbase_aliased *aliased;
- } alias;
-
- struct kbase_context *kctx;
- struct {
- unsigned long address;
- unsigned long size;
- unsigned long nr_pages;
- struct page **pages;
- unsigned int current_mapping_usage_count;
- struct mm_struct *mm;
- dma_addr_t *dma_addrs;
- } user_buf;
- } imported;
- };
- static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
- {
- KBASE_DEBUG_ASSERT(alloc);
-
- if (alloc->type == KBASE_MEM_TYPE_NATIVE)
- atomic_inc(&alloc->gpu_mappings);
- }
- static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc)
- {
- KBASE_DEBUG_ASSERT(alloc);
-
- if (alloc->type == KBASE_MEM_TYPE_NATIVE)
- if (atomic_dec_return(&alloc->gpu_mappings) < 0) {
- pr_err("Mismatched %s:\n", __func__);
- dump_stack();
- }
- }
- void kbase_mem_kref_free(struct kref *kref);
- int kbase_mem_init(struct kbase_device *kbdev);
- void kbase_mem_halt(struct kbase_device *kbdev);
- void kbase_mem_term(struct kbase_device *kbdev);
- static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc)
- {
- kref_get(&alloc->kref);
- return alloc;
- }
- static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc)
- {
- kref_put(&alloc->kref, kbase_mem_kref_free);
- return NULL;
- }
- struct kbase_va_region {
- struct rb_node rblink;
- struct list_head link;
- struct kbase_context *kctx;
- u64 start_pfn;
- size_t nr_pages;
- #define KBASE_REG_FREE (1ul << 0)
- #define KBASE_REG_CPU_WR (1ul << 1)
- #define KBASE_REG_GPU_WR (1ul << 2)
- #define KBASE_REG_GPU_NX (1ul << 3)
- #define KBASE_REG_CPU_CACHED (1ul << 4)
- #define KBASE_REG_GPU_CACHED (1ul << 5)
- #define KBASE_REG_GROWABLE (1ul << 6)
- #define KBASE_REG_PF_GROW (1ul << 7)
- #define KBASE_REG_CUSTOM_VA (1ul << 8)
- #define KBASE_REG_SHARE_IN (1ul << 9)
- #define KBASE_REG_SHARE_BOTH (1ul << 10)
- #define KBASE_REG_ZONE_MASK (3ul << 11)
- #define KBASE_REG_ZONE(x) (((x) & 3) << 11)
- #define KBASE_REG_GPU_RD (1ul<<13)
- #define KBASE_REG_CPU_RD (1ul<<14)
- #define KBASE_REG_ALIGNED (1ul<<15)
- #define KBASE_REG_MEMATTR_MASK (7ul << 16)
- #define KBASE_REG_MEMATTR_INDEX(x) (((x) & 7) << 16)
- #define KBASE_REG_MEMATTR_VALUE(x) (((x) & KBASE_REG_MEMATTR_MASK) >> 16)
- #define KBASE_REG_SECURE (1ul << 19)
- #define KBASE_REG_DONT_NEED (1ul << 20)
- #define KBASE_REG_ZONE_SAME_VA KBASE_REG_ZONE(0)
- #define KBASE_REG_ZONE_EXEC KBASE_REG_ZONE(1)
- #define KBASE_REG_ZONE_EXEC_BASE (0x101000000ULL >> PAGE_SHIFT)
- #define KBASE_REG_ZONE_EXEC_SIZE ((16ULL * 1024 * 1024) >> PAGE_SHIFT)
- #define KBASE_REG_ZONE_CUSTOM_VA KBASE_REG_ZONE(2)
- #define KBASE_REG_ZONE_CUSTOM_VA_BASE (KBASE_REG_ZONE_EXEC_BASE + KBASE_REG_ZONE_EXEC_SIZE) /* Starting after KBASE_REG_ZONE_EXEC */
- #define KBASE_REG_ZONE_CUSTOM_VA_SIZE (((1ULL << 44) >> PAGE_SHIFT) - KBASE_REG_ZONE_CUSTOM_VA_BASE)
- unsigned long flags;
- size_t extent;
- struct kbase_mem_phy_alloc *cpu_alloc;
- struct kbase_mem_phy_alloc *gpu_alloc;
-
- struct kds_resource *kds_res;
-
- struct list_head jit_node;
- };
- static inline phys_addr_t *kbase_get_cpu_phy_pages(struct kbase_va_region *reg)
- {
- KBASE_DEBUG_ASSERT(reg);
- KBASE_DEBUG_ASSERT(reg->cpu_alloc);
- KBASE_DEBUG_ASSERT(reg->gpu_alloc);
- KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
- return reg->cpu_alloc->pages;
- }
- static inline phys_addr_t *kbase_get_gpu_phy_pages(struct kbase_va_region *reg)
- {
- KBASE_DEBUG_ASSERT(reg);
- KBASE_DEBUG_ASSERT(reg->cpu_alloc);
- KBASE_DEBUG_ASSERT(reg->gpu_alloc);
- KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
- return reg->gpu_alloc->pages;
- }
- static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
- {
- KBASE_DEBUG_ASSERT(reg);
-
- if (!reg->cpu_alloc)
- return 0;
- KBASE_DEBUG_ASSERT(reg->cpu_alloc);
- KBASE_DEBUG_ASSERT(reg->gpu_alloc);
- KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);
- return reg->cpu_alloc->nents;
- }
- #define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD ((size_t)(4*1024)) /* size above which vmalloc is used over kmalloc */
- static inline struct kbase_mem_phy_alloc *kbase_alloc_create(size_t nr_pages, enum kbase_memory_type type)
- {
- struct kbase_mem_phy_alloc *alloc;
- size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
- size_t per_page_size = sizeof(*alloc->pages);
-
- if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
- alloc_size += nr_pages *
- sizeof(*alloc->imported.user_buf.dma_addrs);
- per_page_size += sizeof(*alloc->imported.user_buf.dma_addrs);
- }
-
- if (nr_pages > ((((size_t) -1) - sizeof(*alloc))
- / per_page_size))
- return ERR_PTR(-ENOMEM);
-
- if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
- alloc = vzalloc(alloc_size);
- else
- alloc = kzalloc(alloc_size, GFP_KERNEL);
- if (!alloc)
- return ERR_PTR(-ENOMEM);
-
- if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
- alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;
- kref_init(&alloc->kref);
- atomic_set(&alloc->gpu_mappings, 0);
- alloc->nents = 0;
- alloc->pages = (void *)(alloc + 1);
- INIT_LIST_HEAD(&alloc->mappings);
- alloc->type = type;
- INIT_LIST_HEAD(&alloc->zone_cache);
- if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF)
- alloc->imported.user_buf.dma_addrs =
- (void *) (alloc->pages + nr_pages);
- return alloc;
- }
- static inline int kbase_reg_prepare_native(struct kbase_va_region *reg,
- struct kbase_context *kctx)
- {
- KBASE_DEBUG_ASSERT(reg);
- KBASE_DEBUG_ASSERT(!reg->cpu_alloc);
- KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
- KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);
- reg->cpu_alloc = kbase_alloc_create(reg->nr_pages,
- KBASE_MEM_TYPE_NATIVE);
- if (IS_ERR(reg->cpu_alloc))
- return PTR_ERR(reg->cpu_alloc);
- else if (!reg->cpu_alloc)
- return -ENOMEM;
- reg->cpu_alloc->imported.kctx = kctx;
- INIT_LIST_HEAD(®->cpu_alloc->evict_node);
- if (kctx->infinite_cache_active && (reg->flags & KBASE_REG_CPU_CACHED)) {
- reg->gpu_alloc = kbase_alloc_create(reg->nr_pages,
- KBASE_MEM_TYPE_NATIVE);
- reg->gpu_alloc->imported.kctx = kctx;
- INIT_LIST_HEAD(®->gpu_alloc->evict_node);
- } else {
- reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
- }
- INIT_LIST_HEAD(®->jit_node);
- reg->flags &= ~KBASE_REG_FREE;
- return 0;
- }
- static inline int kbase_atomic_add_pages(int num_pages, atomic_t *used_pages)
- {
- int new_val = atomic_add_return(num_pages, used_pages);
- return new_val;
- }
- static inline int kbase_atomic_sub_pages(int num_pages, atomic_t *used_pages)
- {
- int new_val = atomic_sub_return(num_pages, used_pages);
- return new_val;
- }
- #define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT)
- #define KBASE_MEM_POOL_MAX_SIZE_KCTX (SZ_64M >> PAGE_SHIFT)
- int kbase_mem_pool_init(struct kbase_mem_pool *pool,
- size_t max_size,
- struct kbase_device *kbdev,
- struct kbase_mem_pool *next_pool);
- void kbase_mem_pool_term(struct kbase_mem_pool *pool);
- struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);
- void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page,
- bool dirty);
- int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
- phys_addr_t *pages);
- void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
- phys_addr_t *pages, bool dirty, bool reclaimed);
- static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
- {
- return READ_ONCE(pool->cur_size);
- }
- static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool)
- {
- return pool->max_size;
- }
- void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);
- size_t kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);
- int kbase_region_tracker_init(struct kbase_context *kctx);
- int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages);
- void kbase_region_tracker_term(struct kbase_context *kctx);
- struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(struct kbase_context *kctx, u64 gpu_addr);
- struct kbase_va_region *kbase_region_tracker_find_region_base_address(struct kbase_context *kctx, u64 gpu_addr);
- struct kbase_va_region *kbase_alloc_free_region(struct kbase_context *kctx, u64 start_pfn, size_t nr_pages, int zone);
- void kbase_free_alloced_region(struct kbase_va_region *reg);
- int kbase_add_va_region(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
- bool kbase_check_alloc_flags(unsigned long flags);
- bool kbase_check_import_flags(unsigned long flags);
- void kbase_update_region_flags(struct kbase_context *kctx,
- struct kbase_va_region *reg, unsigned long flags);
- void kbase_gpu_vm_lock(struct kbase_context *kctx);
- void kbase_gpu_vm_unlock(struct kbase_context *kctx);
- int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);
- int kbase_mmu_init(struct kbase_context *kctx);
- void kbase_mmu_term(struct kbase_context *kctx);
- phys_addr_t kbase_mmu_alloc_pgd(struct kbase_context *kctx);
- void kbase_mmu_free_pgd(struct kbase_context *kctx);
- int kbase_mmu_insert_pages(struct kbase_context *kctx, u64 vpfn,
- phys_addr_t *phys, size_t nr,
- unsigned long flags);
- int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 vpfn,
- phys_addr_t phys, size_t nr,
- unsigned long flags);
- int kbase_mmu_teardown_pages(struct kbase_context *kctx, u64 vpfn, size_t nr);
- int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, phys_addr_t *phys, size_t nr, unsigned long flags);
- int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr, size_t nr_pages, size_t align);
- int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);
- void kbase_mmu_update(struct kbase_context *kctx);
- void kbase_mmu_disable(struct kbase_context *kctx);
- void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr);
- void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);
- void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages);
- int kbase_sync_now(struct kbase_context *kctx, struct base_syncset *syncset);
- void kbase_sync_single(struct kbase_context *kctx, phys_addr_t cpu_pa,
- phys_addr_t gpu_pa, off_t offset, size_t size,
- enum kbase_sync_type sync_fn);
- void kbase_pre_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
- void kbase_post_job_sync(struct kbase_context *kctx, struct base_syncset *syncsets, size_t nr);
- int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr);
- int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
- void kbase_os_mem_map_lock(struct kbase_context *kctx);
- void kbase_os_mem_map_unlock(struct kbase_context *kctx);
- void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);
- static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
- {
- kbasep_os_process_page_usage_update(kctx, pages);
- }
- static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
- {
- kbasep_os_process_page_usage_update(kctx, 0 - pages);
- }
- int kbasep_find_enclosing_cpu_mapping_offset(struct kbase_context *kctx,
- u64 gpu_addr,
- unsigned long uaddr,
- size_t size,
- u64 *offset);
- enum hrtimer_restart kbasep_as_poke_timer_callback(struct hrtimer *timer);
- void kbase_as_poking_timer_retain_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
- void kbase_as_poking_timer_release_atom(struct kbase_device *kbdev, struct kbase_context *kctx, struct kbase_jd_atom *katom);
- int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested);
- int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);
- static inline void kbase_set_dma_addr(struct page *p, dma_addr_t dma_addr)
- {
- SetPagePrivate(p);
- if (sizeof(dma_addr_t) > sizeof(p->private)) {
-
- KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
- set_page_private(p, dma_addr >> PAGE_SHIFT);
- } else {
- set_page_private(p, dma_addr);
- }
- }
- static inline dma_addr_t kbase_dma_addr(struct page *p)
- {
- if (sizeof(dma_addr_t) > sizeof(p->private))
- return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;
- return (dma_addr_t)page_private(p);
- }
- static inline void kbase_clear_dma_addr(struct page *p)
- {
- ClearPagePrivate(p);
- }
- void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
- struct kbase_context *kctx, struct kbase_as *as);
- void page_fault_worker(struct work_struct *data);
- void bus_fault_worker(struct work_struct *data);
- void kbase_flush_mmu_wqs(struct kbase_device *kbdev);
- void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir);
- void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle,
- size_t size, enum dma_data_direction dir);
- void kbase_jit_debugfs_add(struct kbase_context *kctx);
- int kbase_jit_init(struct kbase_context *kctx);
- struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
- struct base_jit_alloc_info *info);
- void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg);
- void kbase_jit_backing_lost(struct kbase_va_region *reg);
- bool kbase_jit_evict(struct kbase_context *kctx);
- void kbase_jit_term(struct kbase_context *kctx);
- struct kbase_mem_phy_alloc *kbase_map_external_resource(
- struct kbase_context *kctx, struct kbase_va_region *reg,
- struct mm_struct *locked_mm
- #ifdef CONFIG_KDS
- , u32 *kds_res_count, struct kds_resource **kds_resources,
- unsigned long *kds_access_bitmap, bool exclusive
- #endif
- );
- void kbase_unmap_external_resource(struct kbase_context *kctx,
- struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc);
- int kbase_sticky_resource_init(struct kbase_context *kctx);
- struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
- struct kbase_context *kctx, u64 gpu_addr);
- bool kbase_sticky_resource_release(struct kbase_context *kctx,
- struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr);
- void kbase_sticky_resource_term(struct kbase_context *kctx);
- int kbase_zone_cache_update(struct kbase_mem_phy_alloc *alloc,
- size_t start_offset);
- int kbase_zone_cache_build(struct kbase_mem_phy_alloc *alloc);
- void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc);
- #endif // ifndef _KBASE_MEM_H_
|