12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241 |
- // SPDX-License-Identifier: GPL-2.0-only
- /* binder_alloc.c
- *
- * Android IPC Subsystem
- *
- * Copyright (C) 2007-2017 Google, Inc.
- */
- #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
- #include <linux/list.h>
- #include <linux/sched/mm.h>
- #include <linux/module.h>
- #include <linux/rtmutex.h>
- #include <linux/rbtree.h>
- #include <linux/seq_file.h>
- #include <linux/vmalloc.h>
- #include <linux/slab.h>
- #include <linux/sched.h>
- #include <linux/list_lru.h>
- #include <linux/ratelimit.h>
- #include <asm/cacheflush.h>
- #include <linux/uaccess.h>
- #include <linux/highmem.h>
- #include <linux/sizes.h>
- #include "binder_alloc.h"
- #include "binder_trace.h"
- struct list_lru binder_alloc_lru;
- static DEFINE_MUTEX(binder_alloc_mmap_lock);
- enum {
- BINDER_DEBUG_USER_ERROR = 1U << 0,
- BINDER_DEBUG_OPEN_CLOSE = 1U << 1,
- BINDER_DEBUG_BUFFER_ALLOC = 1U << 2,
- BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3,
- };
- static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR;
- module_param_named(debug_mask, binder_alloc_debug_mask,
- uint, 0644);
- #define binder_alloc_debug(mask, x...) \
- do { \
- if (binder_alloc_debug_mask & mask) \
- pr_info_ratelimited(x); \
- } while (0)
- static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer)
- {
- return list_entry(buffer->entry.next, struct binder_buffer, entry);
- }
- static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer)
- {
- return list_entry(buffer->entry.prev, struct binder_buffer, entry);
- }
- static size_t binder_alloc_buffer_size(struct binder_alloc *alloc,
- struct binder_buffer *buffer)
- {
- if (list_is_last(&buffer->entry, &alloc->buffers))
- return alloc->buffer + alloc->buffer_size - buffer->user_data;
- return binder_buffer_next(buffer)->user_data - buffer->user_data;
- }
- static void binder_insert_free_buffer(struct binder_alloc *alloc,
- struct binder_buffer *new_buffer)
- {
- struct rb_node **p = &alloc->free_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
- size_t buffer_size;
- size_t new_buffer_size;
- BUG_ON(!new_buffer->free);
- new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer);
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: add free buffer, size %zd, at %pK\n",
- alloc->pid, new_buffer_size, new_buffer);
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
- buffer_size = binder_alloc_buffer_size(alloc, buffer);
- if (new_buffer_size < buffer_size)
- p = &parent->rb_left;
- else
- p = &parent->rb_right;
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers);
- }
- static void binder_insert_allocated_buffer_locked(
- struct binder_alloc *alloc, struct binder_buffer *new_buffer)
- {
- struct rb_node **p = &alloc->allocated_buffers.rb_node;
- struct rb_node *parent = NULL;
- struct binder_buffer *buffer;
- BUG_ON(new_buffer->free);
- while (*p) {
- parent = *p;
- buffer = rb_entry(parent, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
- if (new_buffer->user_data < buffer->user_data)
- p = &parent->rb_left;
- else if (new_buffer->user_data > buffer->user_data)
- p = &parent->rb_right;
- else
- BUG();
- }
- rb_link_node(&new_buffer->rb_node, parent, p);
- rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers);
- }
- static struct binder_buffer *binder_alloc_prepare_to_free_locked(
- struct binder_alloc *alloc,
- uintptr_t user_ptr)
- {
- struct rb_node *n = alloc->allocated_buffers.rb_node;
- struct binder_buffer *buffer;
- void __user *uptr;
- uptr = (void __user *)user_ptr;
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(buffer->free);
- if (uptr < buffer->user_data)
- n = n->rb_left;
- else if (uptr > buffer->user_data)
- n = n->rb_right;
- else {
- /*
- * Guard against user threads attempting to
- * free the buffer when in use by kernel or
- * after it's already been freed.
- */
- if (!buffer->allow_user_free)
- return ERR_PTR(-EPERM);
- buffer->allow_user_free = 0;
- return buffer;
- }
- }
- return NULL;
- }
- /**
- * binder_alloc_prepare_to_free() - get buffer given user ptr
- * @alloc: binder_alloc for this proc
- * @user_ptr: User pointer to buffer data
- *
- * Validate userspace pointer to buffer data and return buffer corresponding to
- * that user pointer. Search the rb tree for buffer that matches user data
- * pointer.
- *
- * Return: Pointer to buffer or NULL
- */
- struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc,
- uintptr_t user_ptr)
- {
- struct binder_buffer *buffer;
- mutex_lock(&alloc->mutex);
- buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr);
- mutex_unlock(&alloc->mutex);
- return buffer;
- }
- static int binder_update_page_range(struct binder_alloc *alloc, int allocate,
- void __user *start, void __user *end)
- {
- void __user *page_addr;
- unsigned long user_page_addr;
- struct binder_lru_page *page;
- struct vm_area_struct *vma = NULL;
- struct mm_struct *mm = NULL;
- bool need_mm = false;
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: %s pages %pK-%pK\n", alloc->pid,
- allocate ? "allocate" : "free", start, end);
- if (end <= start)
- return 0;
- trace_binder_update_page_range(alloc, allocate, start, end);
- if (allocate == 0)
- goto free_range;
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE];
- if (!page->page_ptr) {
- need_mm = true;
- break;
- }
- }
- if (need_mm && mmget_not_zero(alloc->vma_vm_mm))
- mm = alloc->vma_vm_mm;
- if (mm) {
- down_read(&mm->mmap_sem);
- vma = alloc->vma;
- }
- if (!vma && need_mm) {
- binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
- "%d: binder_alloc_buf failed to map pages in userspace, no vma\n",
- alloc->pid);
- goto err_no_vma;
- }
- for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) {
- int ret;
- bool on_lru;
- size_t index;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
- if (page->page_ptr) {
- trace_binder_alloc_lru_start(alloc, index);
- on_lru = list_lru_del(&binder_alloc_lru, &page->lru);
- WARN_ON(!on_lru);
- trace_binder_alloc_lru_end(alloc, index);
- continue;
- }
- if (WARN_ON(!vma))
- goto err_page_ptr_cleared;
- trace_binder_alloc_page_start(alloc, index);
- page->page_ptr = alloc_page(GFP_KERNEL |
- __GFP_HIGHMEM |
- __GFP_ZERO);
- if (!page->page_ptr) {
- pr_err("%d: binder_alloc_buf failed for page at %pK\n",
- alloc->pid, page_addr);
- goto err_alloc_page_failed;
- }
- page->alloc = alloc;
- INIT_LIST_HEAD(&page->lru);
- user_page_addr = (uintptr_t)page_addr;
- ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr);
- if (ret) {
- pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n",
- alloc->pid, user_page_addr);
- goto err_vm_insert_page_failed;
- }
- if (index + 1 > alloc->pages_high)
- alloc->pages_high = index + 1;
- trace_binder_alloc_page_end(alloc, index);
- /* vm_insert_page does not seem to increment the refcount */
- }
- if (mm) {
- up_read(&mm->mmap_sem);
- mmput(mm);
- }
- return 0;
- free_range:
- for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) {
- bool ret;
- size_t index;
- index = (page_addr - alloc->buffer) / PAGE_SIZE;
- page = &alloc->pages[index];
- trace_binder_free_lru_start(alloc, index);
- ret = list_lru_add(&binder_alloc_lru, &page->lru);
- WARN_ON(!ret);
- trace_binder_free_lru_end(alloc, index);
- if (page_addr == start)
- break;
- continue;
- err_vm_insert_page_failed:
- __free_page(page->page_ptr);
- page->page_ptr = NULL;
- err_alloc_page_failed:
- err_page_ptr_cleared:
- if (page_addr == start)
- break;
- }
- err_no_vma:
- if (mm) {
- up_read(&mm->mmap_sem);
- mmput(mm);
- }
- return vma ? -ENOMEM : -ESRCH;
- }
- static inline void binder_alloc_set_vma(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
- {
- if (vma)
- alloc->vma_vm_mm = vma->vm_mm;
- /*
- * If we see alloc->vma is not NULL, buffer data structures set up
- * completely. Look at smp_rmb side binder_alloc_get_vma.
- * We also want to guarantee new alloc->vma_vm_mm is always visible
- * if alloc->vma is set.
- */
- smp_wmb();
- alloc->vma = vma;
- }
- static inline struct vm_area_struct *binder_alloc_get_vma(
- struct binder_alloc *alloc)
- {
- struct vm_area_struct *vma = NULL;
- if (alloc->vma) {
- /* Look at description in binder_alloc_set_vma */
- smp_rmb();
- vma = alloc->vma;
- }
- return vma;
- }
- static struct binder_buffer *binder_alloc_new_buf_locked(
- struct binder_alloc *alloc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
- {
- struct rb_node *n = alloc->free_buffers.rb_node;
- struct binder_buffer *buffer;
- size_t buffer_size;
- struct rb_node *best_fit = NULL;
- void __user *has_page_addr;
- void __user *end_page_addr;
- size_t size, data_offsets_size;
- int ret;
- if (!binder_alloc_get_vma(alloc)) {
- binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
- "%d: binder_alloc_buf, no vma\n",
- alloc->pid);
- return ERR_PTR(-ESRCH);
- }
- data_offsets_size = ALIGN(data_size, sizeof(void *)) +
- ALIGN(offsets_size, sizeof(void *));
- if (data_offsets_size < data_size || data_offsets_size < offsets_size) {
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: got transaction with invalid size %zd-%zd\n",
- alloc->pid, data_size, offsets_size);
- return ERR_PTR(-EINVAL);
- }
- size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *));
- if (size < data_offsets_size || size < extra_buffers_size) {
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: got transaction with invalid extra_buffers_size %zd\n",
- alloc->pid, extra_buffers_size);
- return ERR_PTR(-EINVAL);
- }
- if (is_async &&
- alloc->free_async_space < size + sizeof(struct binder_buffer)) {
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd failed, no async space left\n",
- alloc->pid, size);
- return ERR_PTR(-ENOSPC);
- }
- /* Pad 0-size buffers so they get assigned unique addresses */
- size = max(size, sizeof(void *));
- while (n) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- BUG_ON(!buffer->free);
- buffer_size = binder_alloc_buffer_size(alloc, buffer);
- if (size < buffer_size) {
- best_fit = n;
- n = n->rb_left;
- } else if (size > buffer_size)
- n = n->rb_right;
- else {
- best_fit = n;
- break;
- }
- }
- if (best_fit == NULL) {
- size_t allocated_buffers = 0;
- size_t largest_alloc_size = 0;
- size_t total_alloc_size = 0;
- size_t free_buffers = 0;
- size_t largest_free_size = 0;
- size_t total_free_size = 0;
- for (n = rb_first(&alloc->allocated_buffers); n != NULL;
- n = rb_next(n)) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- buffer_size = binder_alloc_buffer_size(alloc, buffer);
- allocated_buffers++;
- total_alloc_size += buffer_size;
- if (buffer_size > largest_alloc_size)
- largest_alloc_size = buffer_size;
- }
- for (n = rb_first(&alloc->free_buffers); n != NULL;
- n = rb_next(n)) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- buffer_size = binder_alloc_buffer_size(alloc, buffer);
- free_buffers++;
- total_free_size += buffer_size;
- if (buffer_size > largest_free_size)
- largest_free_size = buffer_size;
- }
- binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
- "%d: binder_alloc_buf size %zd failed, no address space\n",
- alloc->pid, size);
- binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
- "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n",
- total_alloc_size, allocated_buffers,
- largest_alloc_size, total_free_size,
- free_buffers, largest_free_size);
- return ERR_PTR(-ENOSPC);
- }
- if (n == NULL) {
- buffer = rb_entry(best_fit, struct binder_buffer, rb_node);
- buffer_size = binder_alloc_buffer_size(alloc, buffer);
- }
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n",
- alloc->pid, size, buffer, buffer_size);
- has_page_addr = (void __user *)
- (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK);
- WARN_ON(n && buffer_size != size);
- end_page_addr =
- (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size);
- if (end_page_addr > has_page_addr)
- end_page_addr = has_page_addr;
- ret = binder_update_page_range(alloc, 1, (void __user *)
- PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr);
- if (ret)
- return ERR_PTR(ret);
- if (buffer_size != size) {
- struct binder_buffer *new_buffer;
- new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!new_buffer) {
- pr_err("%s: %d failed to alloc new buffer struct\n",
- __func__, alloc->pid);
- goto err_alloc_buf_struct_failed;
- }
- new_buffer->user_data = (u8 __user *)buffer->user_data + size;
- list_add(&new_buffer->entry, &buffer->entry);
- new_buffer->free = 1;
- binder_insert_free_buffer(alloc, new_buffer);
- }
- rb_erase(best_fit, &alloc->free_buffers);
- buffer->free = 0;
- buffer->allow_user_free = 0;
- binder_insert_allocated_buffer_locked(alloc, buffer);
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_alloc_buf size %zd got %pK\n",
- alloc->pid, size, buffer);
- buffer->data_size = data_size;
- buffer->offsets_size = offsets_size;
- buffer->async_transaction = is_async;
- buffer->extra_buffers_size = extra_buffers_size;
- if (is_async) {
- alloc->free_async_space -= size + sizeof(struct binder_buffer);
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_alloc_buf size %zd async free %zd\n",
- alloc->pid, size, alloc->free_async_space);
- }
- return buffer;
- err_alloc_buf_struct_failed:
- binder_update_page_range(alloc, 0, (void __user *)
- PAGE_ALIGN((uintptr_t)buffer->user_data),
- end_page_addr);
- return ERR_PTR(-ENOMEM);
- }
- /**
- * binder_alloc_new_buf() - Allocate a new binder buffer
- * @alloc: binder_alloc for this proc
- * @data_size: size of user data buffer
- * @offsets_size: user specified buffer offset
- * @extra_buffers_size: size of extra space for meta-data (eg, security context)
- * @is_async: buffer for async transaction
- *
- * Allocate a new buffer given the requested sizes. Returns
- * the kernel version of the buffer pointer. The size allocated
- * is the sum of the three given sizes (each rounded up to
- * pointer-sized boundary)
- *
- * Return: The allocated buffer or %NULL if error
- */
- struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
- size_t data_size,
- size_t offsets_size,
- size_t extra_buffers_size,
- int is_async)
- {
- struct binder_buffer *buffer;
- mutex_lock(&alloc->mutex);
- buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size,
- extra_buffers_size, is_async);
- mutex_unlock(&alloc->mutex);
- return buffer;
- }
- static void __user *buffer_start_page(struct binder_buffer *buffer)
- {
- return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK);
- }
- static void __user *prev_buffer_end_page(struct binder_buffer *buffer)
- {
- return (void __user *)
- (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK);
- }
- static void binder_delete_free_buffer(struct binder_alloc *alloc,
- struct binder_buffer *buffer)
- {
- struct binder_buffer *prev, *next = NULL;
- bool to_free = true;
- BUG_ON(alloc->buffers.next == &buffer->entry);
- prev = binder_buffer_prev(buffer);
- BUG_ON(!prev->free);
- if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) {
- to_free = false;
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid, buffer->user_data,
- prev->user_data);
- }
- if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- next = binder_buffer_next(buffer);
- if (buffer_start_page(next) == buffer_start_page(buffer)) {
- to_free = false;
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK share page with %pK\n",
- alloc->pid,
- buffer->user_data,
- next->user_data);
- }
- }
- if (PAGE_ALIGNED(buffer->user_data)) {
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer start %pK is page aligned\n",
- alloc->pid, buffer->user_data);
- to_free = false;
- }
- if (to_free) {
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: merge free, buffer %pK do not share page with %pK or %pK\n",
- alloc->pid, buffer->user_data,
- prev->user_data,
- next ? next->user_data : NULL);
- binder_update_page_range(alloc, 0, buffer_start_page(buffer),
- buffer_start_page(buffer) + PAGE_SIZE);
- }
- list_del(&buffer->entry);
- kfree(buffer);
- }
- static void binder_free_buf_locked(struct binder_alloc *alloc,
- struct binder_buffer *buffer)
- {
- size_t size, buffer_size;
- buffer_size = binder_alloc_buffer_size(alloc, buffer);
- size = ALIGN(buffer->data_size, sizeof(void *)) +
- ALIGN(buffer->offsets_size, sizeof(void *)) +
- ALIGN(buffer->extra_buffers_size, sizeof(void *));
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%d: binder_free_buf %pK size %zd buffer_size %zd\n",
- alloc->pid, buffer, size, buffer_size);
- BUG_ON(buffer->free);
- BUG_ON(size > buffer_size);
- BUG_ON(buffer->transaction != NULL);
- BUG_ON(buffer->user_data < alloc->buffer);
- BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size);
- if (buffer->async_transaction) {
- alloc->free_async_space += size + sizeof(struct binder_buffer);
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC,
- "%d: binder_free_buf size %zd async free %zd\n",
- alloc->pid, size, alloc->free_async_space);
- }
- binder_update_page_range(alloc, 0,
- (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data),
- (void __user *)(((uintptr_t)
- buffer->user_data + buffer_size) & PAGE_MASK));
- rb_erase(&buffer->rb_node, &alloc->allocated_buffers);
- buffer->free = 1;
- if (!list_is_last(&buffer->entry, &alloc->buffers)) {
- struct binder_buffer *next = binder_buffer_next(buffer);
- if (next->free) {
- rb_erase(&next->rb_node, &alloc->free_buffers);
- binder_delete_free_buffer(alloc, next);
- }
- }
- if (alloc->buffers.next != &buffer->entry) {
- struct binder_buffer *prev = binder_buffer_prev(buffer);
- if (prev->free) {
- binder_delete_free_buffer(alloc, buffer);
- rb_erase(&prev->rb_node, &alloc->free_buffers);
- buffer = prev;
- }
- }
- binder_insert_free_buffer(alloc, buffer);
- }
- static void binder_alloc_clear_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffer);
- /**
- * binder_alloc_free_buf() - free a binder buffer
- * @alloc: binder_alloc for this proc
- * @buffer: kernel pointer to buffer
- *
- * Free the buffer allocated via binder_alloc_new_buffer()
- */
- void binder_alloc_free_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffer)
- {
- /*
- * We could eliminate the call to binder_alloc_clear_buf()
- * from binder_alloc_deferred_release() by moving this to
- * binder_alloc_free_buf_locked(). However, that could
- * increase contention for the alloc mutex if clear_on_free
- * is used frequently for large buffers. The mutex is not
- * needed for correctness here.
- */
- if (buffer->clear_on_free) {
- binder_alloc_clear_buf(alloc, buffer);
- buffer->clear_on_free = false;
- }
- mutex_lock(&alloc->mutex);
- binder_free_buf_locked(alloc, buffer);
- mutex_unlock(&alloc->mutex);
- }
- /**
- * binder_alloc_mmap_handler() - map virtual address space for proc
- * @alloc: alloc structure for this proc
- * @vma: vma passed to mmap()
- *
- * Called by binder_mmap() to initialize the space specified in
- * vma for allocating binder buffers
- *
- * Return:
- * 0 = success
- * -EBUSY = address space already mapped
- * -ENOMEM = failed to map memory to given address space
- */
- int binder_alloc_mmap_handler(struct binder_alloc *alloc,
- struct vm_area_struct *vma)
- {
- int ret;
- const char *failure_string;
- struct binder_buffer *buffer;
- mutex_lock(&binder_alloc_mmap_lock);
- if (alloc->buffer_size) {
- ret = -EBUSY;
- failure_string = "already mapped";
- goto err_already_mapped;
- }
- alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start,
- SZ_4M);
- mutex_unlock(&binder_alloc_mmap_lock);
- alloc->buffer = (void __user *)vma->vm_start;
- alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE,
- sizeof(alloc->pages[0]),
- GFP_KERNEL);
- if (alloc->pages == NULL) {
- ret = -ENOMEM;
- failure_string = "alloc page array";
- goto err_alloc_pages_failed;
- }
- buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
- if (!buffer) {
- ret = -ENOMEM;
- failure_string = "alloc buffer struct";
- goto err_alloc_buf_struct_failed;
- }
- buffer->user_data = alloc->buffer;
- list_add(&buffer->entry, &alloc->buffers);
- buffer->free = 1;
- binder_insert_free_buffer(alloc, buffer);
- alloc->free_async_space = alloc->buffer_size / 2;
- binder_alloc_set_vma(alloc, vma);
- mmgrab(alloc->vma_vm_mm);
- return 0;
- err_alloc_buf_struct_failed:
- kfree(alloc->pages);
- alloc->pages = NULL;
- err_alloc_pages_failed:
- alloc->buffer = NULL;
- mutex_lock(&binder_alloc_mmap_lock);
- alloc->buffer_size = 0;
- err_already_mapped:
- mutex_unlock(&binder_alloc_mmap_lock);
- binder_alloc_debug(BINDER_DEBUG_USER_ERROR,
- "%s: %d %lx-%lx %s failed %d\n", __func__,
- alloc->pid, vma->vm_start, vma->vm_end,
- failure_string, ret);
- return ret;
- }
- void binder_alloc_deferred_release(struct binder_alloc *alloc)
- {
- struct rb_node *n;
- int buffers, page_count;
- struct binder_buffer *buffer;
- buffers = 0;
- mutex_lock(&alloc->mutex);
- BUG_ON(alloc->vma);
- while ((n = rb_first(&alloc->allocated_buffers))) {
- buffer = rb_entry(n, struct binder_buffer, rb_node);
- /* Transaction should already have been freed */
- BUG_ON(buffer->transaction);
- if (buffer->clear_on_free) {
- binder_alloc_clear_buf(alloc, buffer);
- buffer->clear_on_free = false;
- }
- binder_free_buf_locked(alloc, buffer);
- buffers++;
- }
- while (!list_empty(&alloc->buffers)) {
- buffer = list_first_entry(&alloc->buffers,
- struct binder_buffer, entry);
- WARN_ON(!buffer->free);
- list_del(&buffer->entry);
- WARN_ON_ONCE(!list_empty(&alloc->buffers));
- kfree(buffer);
- }
- page_count = 0;
- if (alloc->pages) {
- int i;
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- void __user *page_addr;
- bool on_lru;
- if (!alloc->pages[i].page_ptr)
- continue;
- on_lru = list_lru_del(&binder_alloc_lru,
- &alloc->pages[i].lru);
- page_addr = alloc->buffer + i * PAGE_SIZE;
- binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC,
- "%s: %d: page %d at %pK %s\n",
- __func__, alloc->pid, i, page_addr,
- on_lru ? "on lru" : "active");
- __free_page(alloc->pages[i].page_ptr);
- page_count++;
- }
- kfree(alloc->pages);
- }
- mutex_unlock(&alloc->mutex);
- if (alloc->vma_vm_mm)
- mmdrop(alloc->vma_vm_mm);
- binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE,
- "%s: %d buffers %d, pages %d\n",
- __func__, alloc->pid, buffers, page_count);
- }
- static void print_binder_buffer(struct seq_file *m, const char *prefix,
- struct binder_buffer *buffer)
- {
- seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n",
- prefix, buffer->debug_id, buffer->user_data,
- buffer->data_size, buffer->offsets_size,
- buffer->extra_buffers_size,
- buffer->transaction ? "active" : "delivered");
- }
- /**
- * binder_alloc_print_allocated() - print buffer info
- * @m: seq_file for output via seq_printf()
- * @alloc: binder_alloc for this proc
- *
- * Prints information about every buffer associated with
- * the binder_alloc state to the given seq_file
- */
- void binder_alloc_print_allocated(struct seq_file *m,
- struct binder_alloc *alloc)
- {
- struct rb_node *n;
- mutex_lock(&alloc->mutex);
- for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
- print_binder_buffer(m, " buffer",
- rb_entry(n, struct binder_buffer, rb_node));
- mutex_unlock(&alloc->mutex);
- }
- /**
- * binder_alloc_print_pages() - print page usage
- * @m: seq_file for output via seq_printf()
- * @alloc: binder_alloc for this proc
- */
- void binder_alloc_print_pages(struct seq_file *m,
- struct binder_alloc *alloc)
- {
- struct binder_lru_page *page;
- int i;
- int active = 0;
- int lru = 0;
- int free = 0;
- mutex_lock(&alloc->mutex);
- /*
- * Make sure the binder_alloc is fully initialized, otherwise we might
- * read inconsistent state.
- */
- if (binder_alloc_get_vma(alloc) != NULL) {
- for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) {
- page = &alloc->pages[i];
- if (!page->page_ptr)
- free++;
- else if (list_empty(&page->lru))
- active++;
- else
- lru++;
- }
- }
- mutex_unlock(&alloc->mutex);
- seq_printf(m, " pages: %d:%d:%d\n", active, lru, free);
- seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high);
- }
- /**
- * binder_alloc_get_allocated_count() - return count of buffers
- * @alloc: binder_alloc for this proc
- *
- * Return: count of allocated buffers
- */
- int binder_alloc_get_allocated_count(struct binder_alloc *alloc)
- {
- struct rb_node *n;
- int count = 0;
- mutex_lock(&alloc->mutex);
- for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n))
- count++;
- mutex_unlock(&alloc->mutex);
- return count;
- }
- /**
- * binder_alloc_vma_close() - invalidate address space
- * @alloc: binder_alloc for this proc
- *
- * Called from binder_vma_close() when releasing address space.
- * Clears alloc->vma to prevent new incoming transactions from
- * allocating more buffers.
- */
- void binder_alloc_vma_close(struct binder_alloc *alloc)
- {
- binder_alloc_set_vma(alloc, NULL);
- }
- /**
- * binder_alloc_free_page() - shrinker callback to free pages
- * @item: item to free
- * @lock: lock protecting the item
- * @cb_arg: callback argument
- *
- * Called from list_lru_walk() in binder_shrink_scan() to free
- * up pages when the system is under memory pressure.
- */
- enum lru_status binder_alloc_free_page(struct list_head *item,
- struct list_lru_one *lru,
- spinlock_t *lock,
- void *cb_arg)
- __must_hold(lock)
- {
- struct mm_struct *mm = NULL;
- struct binder_lru_page *page = container_of(item,
- struct binder_lru_page,
- lru);
- struct binder_alloc *alloc;
- uintptr_t page_addr;
- size_t index;
- struct vm_area_struct *vma;
- alloc = page->alloc;
- if (!mutex_trylock(&alloc->mutex))
- goto err_get_alloc_mutex_failed;
- if (!page->page_ptr)
- goto err_page_already_freed;
- index = page - alloc->pages;
- page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
- mm = alloc->vma_vm_mm;
- if (!mmget_not_zero(mm))
- goto err_mmget;
- if (!down_read_trylock(&mm->mmap_sem))
- goto err_down_read_mmap_sem_failed;
- vma = binder_alloc_get_vma(alloc);
- list_lru_isolate(lru, item);
- spin_unlock(lock);
- if (vma) {
- trace_binder_unmap_user_start(alloc, index);
- zap_page_range(vma, page_addr, PAGE_SIZE);
- trace_binder_unmap_user_end(alloc, index);
- }
- up_read(&mm->mmap_sem);
- mmput_async(mm);
- trace_binder_unmap_kernel_start(alloc, index);
- __free_page(page->page_ptr);
- page->page_ptr = NULL;
- trace_binder_unmap_kernel_end(alloc, index);
- spin_lock(lock);
- mutex_unlock(&alloc->mutex);
- return LRU_REMOVED_RETRY;
- err_down_read_mmap_sem_failed:
- mmput_async(mm);
- err_mmget:
- err_page_already_freed:
- mutex_unlock(&alloc->mutex);
- err_get_alloc_mutex_failed:
- return LRU_SKIP;
- }
- static unsigned long
- binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
- {
- unsigned long ret = list_lru_count(&binder_alloc_lru);
- return ret;
- }
- static unsigned long
- binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
- {
- unsigned long ret;
- ret = list_lru_walk(&binder_alloc_lru, binder_alloc_free_page,
- NULL, sc->nr_to_scan);
- return ret;
- }
- static struct shrinker binder_shrinker = {
- .count_objects = binder_shrink_count,
- .scan_objects = binder_shrink_scan,
- .seeks = DEFAULT_SEEKS,
- };
- /**
- * binder_alloc_init() - called by binder_open() for per-proc initialization
- * @alloc: binder_alloc for this proc
- *
- * Called from binder_open() to initialize binder_alloc fields for
- * new binder proc
- */
- void binder_alloc_init(struct binder_alloc *alloc)
- {
- alloc->pid = current->group_leader->pid;
- mutex_init(&alloc->mutex);
- INIT_LIST_HEAD(&alloc->buffers);
- }
- int binder_alloc_shrinker_init(void)
- {
- int ret = list_lru_init(&binder_alloc_lru);
- if (ret == 0) {
- ret = register_shrinker(&binder_shrinker);
- if (ret)
- list_lru_destroy(&binder_alloc_lru);
- }
- return ret;
- }
- /**
- * check_buffer() - verify that buffer/offset is safe to access
- * @alloc: binder_alloc for this proc
- * @buffer: binder buffer to be accessed
- * @offset: offset into @buffer data
- * @bytes: bytes to access from offset
- *
- * Check that the @offset/@bytes are within the size of the given
- * @buffer and that the buffer is currently active and not freeable.
- * Offsets must also be multiples of sizeof(u32). The kernel is
- * allowed to touch the buffer in two cases:
- *
- * 1) when the buffer is being created:
- * (buffer->free == 0 && buffer->allow_user_free == 0)
- * 2) when the buffer is being torn down:
- * (buffer->free == 0 && buffer->transaction == NULL).
- *
- * Return: true if the buffer is safe to access
- */
- static inline bool check_buffer(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- binder_size_t offset, size_t bytes)
- {
- size_t buffer_size = binder_alloc_buffer_size(alloc, buffer);
- return buffer_size >= bytes &&
- offset <= buffer_size - bytes &&
- IS_ALIGNED(offset, sizeof(u32)) &&
- !buffer->free &&
- (!buffer->allow_user_free || !buffer->transaction);
- }
- /**
- * binder_alloc_get_page() - get kernel pointer for given buffer offset
- * @alloc: binder_alloc for this proc
- * @buffer: binder buffer to be accessed
- * @buffer_offset: offset into @buffer data
- * @pgoffp: address to copy final page offset to
- *
- * Lookup the struct page corresponding to the address
- * at @buffer_offset into @buffer->user_data. If @pgoffp is not
- * NULL, the byte-offset into the page is written there.
- *
- * The caller is responsible to ensure that the offset points
- * to a valid address within the @buffer and that @buffer is
- * not freeable by the user. Since it can't be freed, we are
- * guaranteed that the corresponding elements of @alloc->pages[]
- * cannot change.
- *
- * Return: struct page
- */
- static struct page *binder_alloc_get_page(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- pgoff_t *pgoffp)
- {
- binder_size_t buffer_space_offset = buffer_offset +
- (buffer->user_data - alloc->buffer);
- pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK;
- size_t index = buffer_space_offset >> PAGE_SHIFT;
- struct binder_lru_page *lru_page;
- lru_page = &alloc->pages[index];
- *pgoffp = pgoff;
- return lru_page->page_ptr;
- }
- /**
- * binder_alloc_clear_buf() - zero out buffer
- * @alloc: binder_alloc for this proc
- * @buffer: binder buffer to be cleared
- *
- * memset the given buffer to 0
- */
- static void binder_alloc_clear_buf(struct binder_alloc *alloc,
- struct binder_buffer *buffer)
- {
- size_t bytes = binder_alloc_buffer_size(alloc, buffer);
- binder_size_t buffer_offset = 0;
- while (bytes) {
- unsigned long size;
- struct page *page;
- pgoff_t pgoff;
- void *kptr;
- page = binder_alloc_get_page(alloc, buffer,
- buffer_offset, &pgoff);
- size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- kptr = kmap(page) + pgoff;
- memset(kptr, 0, size);
- kunmap(page);
- bytes -= size;
- buffer_offset += size;
- }
- }
- /**
- * binder_alloc_copy_user_to_buffer() - copy src user to tgt user
- * @alloc: binder_alloc for this proc
- * @buffer: binder buffer to be accessed
- * @buffer_offset: offset into @buffer data
- * @from: userspace pointer to source buffer
- * @bytes: bytes to copy
- *
- * Copy bytes from source userspace to target buffer.
- *
- * Return: bytes remaining to be copied
- */
- unsigned long
- binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- const void __user *from,
- size_t bytes)
- {
- if (!check_buffer(alloc, buffer, buffer_offset, bytes))
- return bytes;
- while (bytes) {
- unsigned long size;
- unsigned long ret;
- struct page *page;
- pgoff_t pgoff;
- void *kptr;
- page = binder_alloc_get_page(alloc, buffer,
- buffer_offset, &pgoff);
- size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- kptr = kmap(page) + pgoff;
- ret = copy_from_user(kptr, from, size);
- kunmap(page);
- if (ret)
- return bytes - size + ret;
- bytes -= size;
- from += size;
- buffer_offset += size;
- }
- return 0;
- }
- static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc,
- bool to_buffer,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- void *ptr,
- size_t bytes)
- {
- /* All copies must be 32-bit aligned and 32-bit size */
- if (!check_buffer(alloc, buffer, buffer_offset, bytes))
- return -EINVAL;
- while (bytes) {
- unsigned long size;
- struct page *page;
- pgoff_t pgoff;
- void *tmpptr;
- void *base_ptr;
- page = binder_alloc_get_page(alloc, buffer,
- buffer_offset, &pgoff);
- size = min_t(size_t, bytes, PAGE_SIZE - pgoff);
- base_ptr = kmap_atomic(page);
- tmpptr = base_ptr + pgoff;
- if (to_buffer)
- memcpy(tmpptr, ptr, size);
- else
- memcpy(ptr, tmpptr, size);
- /*
- * kunmap_atomic() takes care of flushing the cache
- * if this device has VIVT cache arch
- */
- kunmap_atomic(base_ptr);
- bytes -= size;
- pgoff = 0;
- ptr = ptr + size;
- buffer_offset += size;
- }
- return 0;
- }
- int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- void *src,
- size_t bytes)
- {
- return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset,
- src, bytes);
- }
- int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
- void *dest,
- struct binder_buffer *buffer,
- binder_size_t buffer_offset,
- size_t bytes)
- {
- return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset,
- dest, bytes);
- }
|