123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791 |
- #include <linux/mm.h>
- #include <linux/slab.h>
- #include <linux/string.h>
- #include <linux/compiler.h>
- #include <linux/export.h>
- #include <linux/err.h>
- #include <linux/sched.h>
- #include <linux/sched/mm.h>
- #include <linux/sched/task_stack.h>
- #include <linux/security.h>
- #include <linux/swap.h>
- #include <linux/swapops.h>
- #include <linux/mman.h>
- #include <linux/hugetlb.h>
- #include <linux/vmalloc.h>
- #include <linux/userfaultfd_k.h>
- #include <asm/sections.h>
- #include <linux/uaccess.h>
- #include "internal.h"
- static inline int is_kernel_rodata(unsigned long addr)
- {
- return addr >= (unsigned long)__start_rodata &&
- addr < (unsigned long)__end_rodata;
- }
- /**
- * kfree_const - conditionally free memory
- * @x: pointer to the memory
- *
- * Function calls kfree only if @x is not in .rodata section.
- */
- void kfree_const(const void *x)
- {
- if (!is_kernel_rodata((unsigned long)x))
- kfree(x);
- }
- EXPORT_SYMBOL(kfree_const);
- /**
- * kstrdup - allocate space for and copy an existing string
- * @s: the string to duplicate
- * @gfp: the GFP mask used in the kmalloc() call when allocating memory
- */
- char *kstrdup(const char *s, gfp_t gfp)
- {
- size_t len;
- char *buf;
- if (!s)
- return NULL;
- len = strlen(s) + 1;
- buf = kmalloc_track_caller(len, gfp);
- if (buf)
- memcpy(buf, s, len);
- return buf;
- }
- EXPORT_SYMBOL(kstrdup);
- /**
- * kstrdup_const - conditionally duplicate an existing const string
- * @s: the string to duplicate
- * @gfp: the GFP mask used in the kmalloc() call when allocating memory
- *
- * Function returns source string if it is in .rodata section otherwise it
- * fallbacks to kstrdup.
- * Strings allocated by kstrdup_const should be freed by kfree_const.
- */
- const char *kstrdup_const(const char *s, gfp_t gfp)
- {
- if (is_kernel_rodata((unsigned long)s))
- return s;
- return kstrdup(s, gfp);
- }
- EXPORT_SYMBOL(kstrdup_const);
- /**
- * kstrndup - allocate space for and copy an existing string
- * @s: the string to duplicate
- * @max: read at most @max chars from @s
- * @gfp: the GFP mask used in the kmalloc() call when allocating memory
- *
- * Note: Use kmemdup_nul() instead if the size is known exactly.
- */
- char *kstrndup(const char *s, size_t max, gfp_t gfp)
- {
- size_t len;
- char *buf;
- if (!s)
- return NULL;
- len = strnlen(s, max);
- buf = kmalloc_track_caller(len+1, gfp);
- if (buf) {
- memcpy(buf, s, len);
- buf[len] = '\0';
- }
- return buf;
- }
- EXPORT_SYMBOL(kstrndup);
- /**
- * kmemdup - duplicate region of memory
- *
- * @src: memory region to duplicate
- * @len: memory region length
- * @gfp: GFP mask to use
- */
- void *kmemdup(const void *src, size_t len, gfp_t gfp)
- {
- void *p;
- p = kmalloc_track_caller(len, gfp);
- if (p)
- memcpy(p, src, len);
- return p;
- }
- EXPORT_SYMBOL(kmemdup);
- /**
- * kmemdup_nul - Create a NUL-terminated string from unterminated data
- * @s: The data to stringify
- * @len: The size of the data
- * @gfp: the GFP mask used in the kmalloc() call when allocating memory
- */
- char *kmemdup_nul(const char *s, size_t len, gfp_t gfp)
- {
- char *buf;
- if (!s)
- return NULL;
- buf = kmalloc_track_caller(len + 1, gfp);
- if (buf) {
- memcpy(buf, s, len);
- buf[len] = '\0';
- }
- return buf;
- }
- EXPORT_SYMBOL(kmemdup_nul);
- /**
- * memdup_user - duplicate memory region from user space
- *
- * @src: source address in user space
- * @len: number of bytes to copy
- *
- * Returns an ERR_PTR() on failure. Result is physically
- * contiguous, to be freed by kfree().
- */
- void *memdup_user(const void __user *src, size_t len)
- {
- void *p;
- p = kmalloc_track_caller(len, GFP_USER);
- if (!p)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(p, src, len)) {
- kfree(p);
- return ERR_PTR(-EFAULT);
- }
- return p;
- }
- EXPORT_SYMBOL(memdup_user);
- /**
- * vmemdup_user - duplicate memory region from user space
- *
- * @src: source address in user space
- * @len: number of bytes to copy
- *
- * Returns an ERR_PTR() on failure. Result may be not
- * physically contiguous. Use kvfree() to free.
- */
- void *vmemdup_user(const void __user *src, size_t len)
- {
- void *p;
- p = kvmalloc(len, GFP_USER);
- if (!p)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(p, src, len)) {
- kvfree(p);
- return ERR_PTR(-EFAULT);
- }
- return p;
- }
- EXPORT_SYMBOL(vmemdup_user);
- /**
- * strndup_user - duplicate an existing string from user space
- * @s: The string to duplicate
- * @n: Maximum number of bytes to copy, including the trailing NUL.
- */
- char *strndup_user(const char __user *s, long n)
- {
- char *p;
- long length;
- length = strnlen_user(s, n);
- if (!length)
- return ERR_PTR(-EFAULT);
- if (length > n)
- return ERR_PTR(-EINVAL);
- p = memdup_user(s, length);
- if (IS_ERR(p))
- return p;
- p[length - 1] = '\0';
- return p;
- }
- EXPORT_SYMBOL(strndup_user);
- /**
- * memdup_user_nul - duplicate memory region from user space and NUL-terminate
- *
- * @src: source address in user space
- * @len: number of bytes to copy
- *
- * Returns an ERR_PTR() on failure.
- */
- void *memdup_user_nul(const void __user *src, size_t len)
- {
- char *p;
- /*
- * Always use GFP_KERNEL, since copy_from_user() can sleep and
- * cause pagefault, which makes it pointless to use GFP_NOFS
- * or GFP_ATOMIC.
- */
- p = kmalloc_track_caller(len + 1, GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
- if (copy_from_user(p, src, len)) {
- kfree(p);
- return ERR_PTR(-EFAULT);
- }
- p[len] = '\0';
- return p;
- }
- EXPORT_SYMBOL(memdup_user_nul);
- void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma,
- struct vm_area_struct *prev, struct rb_node *rb_parent)
- {
- struct vm_area_struct *next;
- vma->vm_prev = prev;
- if (prev) {
- next = prev->vm_next;
- prev->vm_next = vma;
- } else {
- mm->mmap = vma;
- if (rb_parent)
- next = rb_entry(rb_parent,
- struct vm_area_struct, vm_rb);
- else
- next = NULL;
- }
- vma->vm_next = next;
- if (next)
- next->vm_prev = vma;
- }
- /* Check if the vma is being used as a stack by this task */
- int vma_is_stack_for_current(struct vm_area_struct *vma)
- {
- struct task_struct * __maybe_unused t = current;
- return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t));
- }
- #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT)
- void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
- {
- mm->mmap_base = TASK_UNMAPPED_BASE;
- mm->get_unmapped_area = arch_get_unmapped_area;
- }
- #endif
- /*
- * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
- * back to the regular GUP.
- * Note a difference with get_user_pages_fast: this always returns the
- * number of pages pinned, 0 if no pages were pinned.
- * If the architecture does not support this function, simply return with no
- * pages pinned.
- */
- int __weak __get_user_pages_fast(unsigned long start,
- int nr_pages, int write, struct page **pages)
- {
- return 0;
- }
- EXPORT_SYMBOL_GPL(__get_user_pages_fast);
- /**
- * get_user_pages_fast() - pin user pages in memory
- * @start: starting user address
- * @nr_pages: number of pages from start to pin
- * @write: whether pages will be written to
- * @pages: array that receives pointers to the pages pinned.
- * Should be at least nr_pages long.
- *
- * Returns number of pages pinned. This may be fewer than the number
- * requested. If nr_pages is 0 or negative, returns 0. If no pages
- * were pinned, returns -errno.
- *
- * get_user_pages_fast provides equivalent functionality to get_user_pages,
- * operating on current and current->mm, with force=0 and vma=NULL. However
- * unlike get_user_pages, it must be called without mmap_sem held.
- *
- * get_user_pages_fast may take mmap_sem and page table locks, so no
- * assumptions can be made about lack of locking. get_user_pages_fast is to be
- * implemented in a way that is advantageous (vs get_user_pages()) when the
- * user memory area is already faulted in and present in ptes. However if the
- * pages have to be faulted in, it may turn out to be slightly slower so
- * callers need to carefully consider what to use. On many architectures,
- * get_user_pages_fast simply falls back to get_user_pages.
- */
- int __weak get_user_pages_fast(unsigned long start,
- int nr_pages, int write, struct page **pages)
- {
- return get_user_pages_unlocked(start, nr_pages, pages,
- write ? FOLL_WRITE : 0);
- }
- EXPORT_SYMBOL_GPL(get_user_pages_fast);
- unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot,
- unsigned long flag, unsigned long pgoff)
- {
- unsigned long ret;
- struct mm_struct *mm = current->mm;
- unsigned long populate;
- LIST_HEAD(uf);
- ret = security_mmap_file(file, prot, flag);
- if (!ret) {
- if (down_write_killable(&mm->mmap_sem))
- return -EINTR;
- ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff,
- &populate, &uf);
- up_write(&mm->mmap_sem);
- userfaultfd_unmap_complete(mm, &uf);
- if (populate)
- mm_populate(ret, populate);
- }
- return ret;
- }
- unsigned long vm_mmap(struct file *file, unsigned long addr,
- unsigned long len, unsigned long prot,
- unsigned long flag, unsigned long offset)
- {
- if (unlikely(offset + PAGE_ALIGN(len) < offset))
- return -EINVAL;
- if (unlikely(offset_in_page(offset)))
- return -EINVAL;
- return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
- }
- EXPORT_SYMBOL(vm_mmap);
- /**
- * kvmalloc_node - attempt to allocate physically contiguous memory, but upon
- * failure, fall back to non-contiguous (vmalloc) allocation.
- * @size: size of the request.
- * @flags: gfp mask for the allocation - must be compatible (superset) with GFP_KERNEL.
- * @node: numa node to allocate from
- *
- * Uses kmalloc to get the memory but if the allocation fails then falls back
- * to the vmalloc allocator. Use kvfree for freeing the memory.
- *
- * Reclaim modifiers - __GFP_NORETRY and __GFP_NOFAIL are not supported.
- * __GFP_RETRY_MAYFAIL is supported, and it should be used only if kmalloc is
- * preferable to the vmalloc fallback, due to visible performance drawbacks.
- *
- * Please note that any use of gfp flags outside of GFP_KERNEL is careful to not
- * fall back to vmalloc.
- */
- void *kvmalloc_node(size_t size, gfp_t flags, int node)
- {
- gfp_t kmalloc_flags = flags;
- void *ret;
- /*
- * vmalloc uses GFP_KERNEL for some internal allocations (e.g page tables)
- * so the given set of flags has to be compatible.
- */
- if ((flags & GFP_KERNEL) != GFP_KERNEL)
- return kmalloc_node(size, flags, node);
- /*
- * We want to attempt a large physically contiguous block first because
- * it is less likely to fragment multiple larger blocks and therefore
- * contribute to a long term fragmentation less than vmalloc fallback.
- * However make sure that larger requests are not too disruptive - no
- * OOM killer and no allocation failure warnings as we have a fallback.
- */
- if (size > PAGE_SIZE) {
- kmalloc_flags |= __GFP_NOWARN;
- if (!(kmalloc_flags & __GFP_RETRY_MAYFAIL))
- kmalloc_flags |= __GFP_NORETRY;
- }
- ret = kmalloc_node(size, kmalloc_flags, node);
- /*
- * It doesn't really make sense to fallback to vmalloc for sub page
- * requests
- */
- if (ret || size <= PAGE_SIZE)
- return ret;
- return __vmalloc_node_flags_caller(size, node, flags,
- __builtin_return_address(0));
- }
- EXPORT_SYMBOL(kvmalloc_node);
- /**
- * kvfree() - Free memory.
- * @addr: Pointer to allocated memory.
- *
- * kvfree frees memory allocated by any of vmalloc(), kmalloc() or kvmalloc().
- * It is slightly more efficient to use kfree() or vfree() if you are certain
- * that you know which one to use.
- *
- * Context: Any context except NMI.
- */
- void kvfree(const void *addr)
- {
- if (is_vmalloc_addr(addr))
- vfree(addr);
- else
- kfree(addr);
- }
- EXPORT_SYMBOL(kvfree);
- static inline void *__page_rmapping(struct page *page)
- {
- unsigned long mapping;
- mapping = (unsigned long)page->mapping;
- mapping &= ~PAGE_MAPPING_FLAGS;
- return (void *)mapping;
- }
- /* Neutral page->mapping pointer to address_space or anon_vma or other */
- void *page_rmapping(struct page *page)
- {
- page = compound_head(page);
- return __page_rmapping(page);
- }
- /*
- * Return true if this page is mapped into pagetables.
- * For compound page it returns true if any subpage of compound page is mapped.
- */
- bool page_mapped(struct page *page)
- {
- int i;
- if (likely(!PageCompound(page)))
- return atomic_read(&page->_mapcount) >= 0;
- page = compound_head(page);
- if (atomic_read(compound_mapcount_ptr(page)) >= 0)
- return true;
- if (PageHuge(page))
- return false;
- for (i = 0; i < (1 << compound_order(page)); i++) {
- if (atomic_read(&page[i]._mapcount) >= 0)
- return true;
- }
- return false;
- }
- EXPORT_SYMBOL(page_mapped);
- struct anon_vma *page_anon_vma(struct page *page)
- {
- unsigned long mapping;
- page = compound_head(page);
- mapping = (unsigned long)page->mapping;
- if ((mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
- return NULL;
- return __page_rmapping(page);
- }
- struct address_space *page_mapping(struct page *page)
- {
- struct address_space *mapping;
- page = compound_head(page);
- /* This happens if someone calls flush_dcache_page on slab page */
- if (unlikely(PageSlab(page)))
- return NULL;
- if (unlikely(PageSwapCache(page))) {
- swp_entry_t entry;
- entry.val = page_private(page);
- return swap_address_space(entry);
- }
- mapping = page->mapping;
- if ((unsigned long)mapping & PAGE_MAPPING_ANON)
- return NULL;
- return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
- }
- EXPORT_SYMBOL(page_mapping);
- /*
- * For file cache pages, return the address_space, otherwise return NULL
- */
- struct address_space *page_mapping_file(struct page *page)
- {
- if (unlikely(PageSwapCache(page)))
- return NULL;
- return page_mapping(page);
- }
- /* Slow path of page_mapcount() for compound pages */
- int __page_mapcount(struct page *page)
- {
- int ret;
- ret = atomic_read(&page->_mapcount) + 1;
- /*
- * For file THP page->_mapcount contains total number of mapping
- * of the page: no need to look into compound_mapcount.
- */
- if (!PageAnon(page) && !PageHuge(page))
- return ret;
- page = compound_head(page);
- ret += atomic_read(compound_mapcount_ptr(page)) + 1;
- if (PageDoubleMap(page))
- ret--;
- return ret;
- }
- EXPORT_SYMBOL_GPL(__page_mapcount);
- int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS;
- int sysctl_overcommit_ratio __read_mostly = 50;
- unsigned long sysctl_overcommit_kbytes __read_mostly;
- int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
- unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
- unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
- int overcommit_ratio_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret;
- ret = proc_dointvec(table, write, buffer, lenp, ppos);
- if (ret == 0 && write)
- sysctl_overcommit_kbytes = 0;
- return ret;
- }
- int overcommit_kbytes_handler(struct ctl_table *table, int write,
- void __user *buffer, size_t *lenp,
- loff_t *ppos)
- {
- int ret;
- ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
- if (ret == 0 && write)
- sysctl_overcommit_ratio = 0;
- return ret;
- }
- /*
- * Committed memory limit enforced when OVERCOMMIT_NEVER policy is used
- */
- unsigned long vm_commit_limit(void)
- {
- unsigned long allowed;
- if (sysctl_overcommit_kbytes)
- allowed = sysctl_overcommit_kbytes >> (PAGE_SHIFT - 10);
- else
- allowed = ((totalram_pages - hugetlb_total_pages())
- * sysctl_overcommit_ratio / 100);
- allowed += total_swap_pages;
- return allowed;
- }
- /*
- * Make sure vm_committed_as in one cacheline and not cacheline shared with
- * other variables. It can be updated by several CPUs frequently.
- */
- struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
- /*
- * The global memory commitment made in the system can be a metric
- * that can be used to drive ballooning decisions when Linux is hosted
- * as a guest. On Hyper-V, the host implements a policy engine for dynamically
- * balancing memory across competing virtual machines that are hosted.
- * Several metrics drive this policy engine including the guest reported
- * memory commitment.
- */
- unsigned long vm_memory_committed(void)
- {
- return percpu_counter_read_positive(&vm_committed_as);
- }
- EXPORT_SYMBOL_GPL(vm_memory_committed);
- /*
- * Check that a process has enough memory to allocate a new virtual
- * mapping. 0 means there is enough memory for the allocation to
- * succeed and -ENOMEM implies there is not.
- *
- * We currently support three overcommit policies, which are set via the
- * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst
- *
- * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
- * Additional code 2002 Jul 20 by Robert Love.
- *
- * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
- *
- * Note this is a helper function intended to be used by LSMs which
- * wish to use this logic.
- */
- int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
- {
- long free, allowed, reserve;
- VM_WARN_ONCE(percpu_counter_read(&vm_committed_as) <
- -(s64)vm_committed_as_batch * num_online_cpus(),
- "memory commitment underflow");
- vm_acct_memory(pages);
- /*
- * Sometimes we want to use more memory than we have
- */
- if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
- return 0;
- if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
- free = global_zone_page_state(NR_FREE_PAGES);
- free += global_node_page_state(NR_FILE_PAGES);
- /*
- * shmem pages shouldn't be counted as free in this
- * case, they can't be purged, only swapped out, and
- * that won't affect the overall amount of available
- * memory in the system.
- */
- free -= global_node_page_state(NR_SHMEM);
- free += get_nr_swap_pages();
- /*
- * Any slabs which are created with the
- * SLAB_RECLAIM_ACCOUNT flag claim to have contents
- * which are reclaimable, under pressure. The dentry
- * cache and most inode caches should fall into this
- */
- free += global_node_page_state(NR_SLAB_RECLAIMABLE);
- /*
- * Part of the kernel memory, which can be released
- * under memory pressure.
- */
- free += global_node_page_state(
- NR_INDIRECTLY_RECLAIMABLE_BYTES) >> PAGE_SHIFT;
- /*
- * Leave reserved pages. The pages are not for anonymous pages.
- */
- if (free <= totalreserve_pages)
- goto error;
- else
- free -= totalreserve_pages;
- /*
- * Reserve some for root
- */
- if (!cap_sys_admin)
- free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
- if (free > pages)
- return 0;
- goto error;
- }
- allowed = vm_commit_limit();
- /*
- * Reserve some for root
- */
- if (!cap_sys_admin)
- allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
- /*
- * Don't let a single process grow so big a user can't recover
- */
- if (mm) {
- reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
- allowed -= min_t(long, mm->total_vm / 32, reserve);
- }
- if (percpu_counter_read_positive(&vm_committed_as) < allowed)
- return 0;
- error:
- vm_unacct_memory(pages);
- return -ENOMEM;
- }
- /**
- * get_cmdline() - copy the cmdline value to a buffer.
- * @task: the task whose cmdline value to copy.
- * @buffer: the buffer to copy to.
- * @buflen: the length of the buffer. Larger cmdline values are truncated
- * to this length.
- * Returns the size of the cmdline field copied. Note that the copy does
- * not guarantee an ending NULL byte.
- */
- int get_cmdline(struct task_struct *task, char *buffer, int buflen)
- {
- int res = 0;
- unsigned int len;
- struct mm_struct *mm = get_task_mm(task);
- unsigned long arg_start, arg_end, env_start, env_end;
- if (!mm)
- goto out;
- if (!mm->arg_end)
- goto out_mm; /* Shh! No looking before we're done */
- down_read(&mm->mmap_sem);
- arg_start = mm->arg_start;
- arg_end = mm->arg_end;
- env_start = mm->env_start;
- env_end = mm->env_end;
- up_read(&mm->mmap_sem);
- len = arg_end - arg_start;
- if (len > buflen)
- len = buflen;
- res = access_process_vm(task, arg_start, buffer, len, FOLL_FORCE);
- /*
- * If the nul at the end of args has been overwritten, then
- * assume application is using setproctitle(3).
- */
- if (res > 0 && buffer[res-1] != '\0' && len < buflen) {
- len = strnlen(buffer, res);
- if (len < res) {
- res = len;
- } else {
- len = env_end - env_start;
- if (len > buflen - res)
- len = buflen - res;
- res += access_process_vm(task, env_start,
- buffer+res, len,
- FOLL_FORCE);
- res = strnlen(buffer, res);
- }
- }
- out_mm:
- mmput(mm);
- out:
- return res;
- }
|