Skip to content
Snippets Groups Projects
memory.c 71.8 KiB
Newer Older
	entry = pte_to_swp_entry(orig_pte);
	if (is_migration_entry(entry)) {
		migration_entry_wait(mm, pmd, address);
		goto out;
	}
	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
Linus Torvalds's avatar
Linus Torvalds committed
	page = lookup_swap_cache(entry);
	if (!page) {
		grab_swap_token(); /* Contend for token _before_ read-in */
		page = swapin_readahead(entry,
					GFP_HIGHUSER_MOVABLE, vma, address);
Linus Torvalds's avatar
Linus Torvalds committed
		if (!page) {
			/*
			 * Back out if somebody else faulted in this pte
			 * while we released the pte lock.
Linus Torvalds's avatar
Linus Torvalds committed
			 */
			page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
Linus Torvalds's avatar
Linus Torvalds committed
			if (likely(pte_same(*page_table, orig_pte)))
				ret = VM_FAULT_OOM;
			delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
			goto unlock;
Linus Torvalds's avatar
Linus Torvalds committed
		}

		/* Had to read the page from swap area: Major fault */
		ret = VM_FAULT_MAJOR;
		count_vm_event(PGMAJFAULT);
Linus Torvalds's avatar
Linus Torvalds committed
	}

	mark_page_accessed(page);
	lock_page(page);
	delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
	 * Back out if somebody else already faulted in this pte.
Linus Torvalds's avatar
Linus Torvalds committed
	 */
	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
	if (unlikely(!pte_same(*page_table, orig_pte)))
		goto out_nomap;

	if (unlikely(!PageUptodate(page))) {
		ret = VM_FAULT_SIGBUS;
		goto out_nomap;
Linus Torvalds's avatar
Linus Torvalds committed
	}

	/* The page isn't present yet, go ahead with the fault. */

	inc_mm_counter(mm, anon_rss);
Linus Torvalds's avatar
Linus Torvalds committed
	pte = mk_pte(page, vma->vm_page_prot);
	if (write_access && can_share_swap_page(page)) {
		pte = maybe_mkwrite(pte_mkdirty(pte), vma);
		write_access = 0;
	}

	flush_icache_page(vma, page);
	set_pte_at(mm, address, page_table, pte);
	page_add_anon_rmap(page, vma, address);

	swap_free(entry);
	if (vm_swap_full())
		remove_exclusive_swap_page(page);
	unlock_page(page);

Linus Torvalds's avatar
Linus Torvalds committed
	if (write_access) {
Nick Piggin's avatar
Nick Piggin committed
		/* XXX: We could OR the do_wp_page code with this one? */
Linus Torvalds's avatar
Linus Torvalds committed
		if (do_wp_page(mm, vma, address,
Nick Piggin's avatar
Nick Piggin committed
				page_table, pmd, ptl, pte) & VM_FAULT_OOM)
Linus Torvalds's avatar
Linus Torvalds committed
			ret = VM_FAULT_OOM;
		goto out;
	}

	/* No need to invalidate - it was non-present before */
	update_mmu_cache(vma, address, pte);
	pte_unmap_unlock(page_table, ptl);
Linus Torvalds's avatar
Linus Torvalds committed
out:
	return ret;
	pte_unmap_unlock(page_table, ptl);
	unlock_page(page);
	page_cache_release(page);
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
Linus Torvalds's avatar
Linus Torvalds committed
 */
static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
		int write_access)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct page *page;
	spinlock_t *ptl;
Linus Torvalds's avatar
Linus Torvalds committed
	pte_t entry;

Nick Piggin's avatar
Nick Piggin committed
	/* Allocate our own private page. */
	pte_unmap(page_table);
Nick Piggin's avatar
Nick Piggin committed
	if (unlikely(anon_vma_prepare(vma)))
		goto oom;
	page = alloc_zeroed_user_highpage_movable(vma, address);
	if (!page)
		goto oom;
Nick Piggin's avatar
Nick Piggin committed
	entry = mk_pte(page, vma->vm_page_prot);
	entry = maybe_mkwrite(pte_mkdirty(entry), vma);
Linus Torvalds's avatar
Linus Torvalds committed

Nick Piggin's avatar
Nick Piggin committed
	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
	if (!pte_none(*page_table))
		goto release;
	inc_mm_counter(mm, anon_rss);
	lru_cache_add_active(page);
	page_add_new_anon_rmap(page, vma, address);
	set_pte_at(mm, address, page_table, entry);
Linus Torvalds's avatar
Linus Torvalds committed

	/* No need to invalidate - it was non-present before */
	update_mmu_cache(vma, address, entry);
unlock:
	pte_unmap_unlock(page_table, ptl);
Nick Piggin's avatar
Nick Piggin committed
	return 0;
release:
	page_cache_release(page);
	goto unlock;
Linus Torvalds's avatar
Linus Torvalds committed
	return VM_FAULT_OOM;
}

/*
 * __do_fault() tries to create a new page mapping. It aggressively
Linus Torvalds's avatar
Linus Torvalds committed
 * tries to share with existing pages, but makes a separate copy if
 * the FAULT_FLAG_WRITE is set in the flags parameter in order to avoid
 * the next page fault.
Linus Torvalds's avatar
Linus Torvalds committed
 *
 * As this is called only for pages that do not currently exist, we
 * do not need to flush old virtual caches or the TLB.
 *
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte neither mapped nor locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
Linus Torvalds's avatar
Linus Torvalds committed
 */
static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pmd_t *pmd,
		pgoff_t pgoff, unsigned int flags, pte_t orig_pte)
Linus Torvalds's avatar
Linus Torvalds committed
{
	pte_t *page_table;
	spinlock_t *ptl;
Nick Piggin's avatar
Nick Piggin committed
	struct page *page;
Linus Torvalds's avatar
Linus Torvalds committed
	pte_t entry;
	int anon = 0;
	struct page *dirty_page = NULL;
Nick Piggin's avatar
Nick Piggin committed
	struct vm_fault vmf;
	int ret;
	int page_mkwrite = 0;
Nick Piggin's avatar
Nick Piggin committed
	vmf.virtual_address = (void __user *)(address & PAGE_MASK);
	vmf.pgoff = pgoff;
	vmf.flags = flags;
	vmf.page = NULL;
Linus Torvalds's avatar
Linus Torvalds committed

	BUG_ON(vma->vm_flags & VM_PFNMAP);

	if (likely(vma->vm_ops->fault)) {
Nick Piggin's avatar
Nick Piggin committed
		ret = vma->vm_ops->fault(vma, &vmf);
Nick Piggin's avatar
Nick Piggin committed
		if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
			return ret;
	} else {
		/* Legacy ->nopage path */
Nick Piggin's avatar
Nick Piggin committed
		ret = 0;
Nick Piggin's avatar
Nick Piggin committed
		vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
		/* no page was available -- either SIGBUS or OOM */
Nick Piggin's avatar
Nick Piggin committed
		if (unlikely(vmf.page == NOPAGE_SIGBUS))
Nick Piggin's avatar
Nick Piggin committed
		else if (unlikely(vmf.page == NOPAGE_OOM))
Linus Torvalds's avatar
Linus Torvalds committed

Nick Piggin's avatar
Nick Piggin committed
	 * For consistency in subsequent calls, make the faulted page always
Nick Piggin's avatar
Nick Piggin committed
	if (unlikely(!(ret & VM_FAULT_LOCKED)))
Nick Piggin's avatar
Nick Piggin committed
		lock_page(vmf.page);
Nick Piggin's avatar
Nick Piggin committed
		VM_BUG_ON(!PageLocked(vmf.page));
Linus Torvalds's avatar
Linus Torvalds committed
	/*
	 * Should we do an early C-O-W break?
	 */
Nick Piggin's avatar
Nick Piggin committed
	page = vmf.page;
	if (flags & FAULT_FLAG_WRITE) {
		if (!(vma->vm_flags & VM_SHARED)) {
			if (unlikely(anon_vma_prepare(vma))) {
Nick Piggin's avatar
Nick Piggin committed
				ret = VM_FAULT_OOM;
Nick Piggin's avatar
Nick Piggin committed
			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
						vma, address);
Nick Piggin's avatar
Nick Piggin committed
				ret = VM_FAULT_OOM;
Nick Piggin's avatar
Nick Piggin committed
			copy_user_highpage(page, vmf.page, address, vma);
			/*
			 * If the page will be shareable, see if the backing
			 * address space wants to know that the page is about
			if (vma->vm_ops->page_mkwrite) {
				unlock_page(page);
				if (vma->vm_ops->page_mkwrite(vma, page) < 0) {
Nick Piggin's avatar
Nick Piggin committed
					ret = VM_FAULT_SIGBUS;
					anon = 1; /* no anon but release vmf.page */
					goto out_unlocked;
				}
				lock_page(page);
Nick Piggin's avatar
Nick Piggin committed
				/*
				 * XXX: this is not quite right (racy vs
				 * invalidate) to unlock and relock the page
				 * like this, however a better fix requires
				 * reworking page_mkwrite locking API, which
				 * is better done later.
				 */
				if (!page->mapping) {
Nick Piggin's avatar
Nick Piggin committed
					ret = 0;
Nick Piggin's avatar
Nick Piggin committed
					anon = 1; /* no anon but release vmf.page */
					goto out;
				}
	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * This silly early PAGE_DIRTY setting removes a race
	 * due to the bad i386 page protection. But it's valid
	 * for other architectures too.
	 *
	 * Note that if write_access is true, we either now have
	 * an exclusive copy of the page, or this is a shared mapping,
	 * so we can make it writable and dirty to avoid having to
	 * handle that later.
	 */
	/* Only go through if we didn't race with anybody else... */
	if (likely(pte_same(*page_table, orig_pte))) {
		flush_icache_page(vma, page);
		entry = mk_pte(page, vma->vm_page_prot);
		if (flags & FAULT_FLAG_WRITE)
Linus Torvalds's avatar
Linus Torvalds committed
			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
		set_pte_at(mm, address, page_table, entry);
		if (anon) {
                        inc_mm_counter(mm, anon_rss);
                        lru_cache_add_active(page);
                        page_add_new_anon_rmap(page, vma, address);
			inc_mm_counter(mm, file_rss);
			page_add_file_rmap(page);
			if (flags & FAULT_FLAG_WRITE) {
				get_page(dirty_page);
			}

		/* no need to invalidate: a not-present page won't be cached */
		update_mmu_cache(vma, address, entry);
Linus Torvalds's avatar
Linus Torvalds committed
	} else {
		if (anon)
			page_cache_release(page);
		else
			anon = 1; /* no anon but release faulted_page */
	pte_unmap_unlock(page_table, ptl);
Nick Piggin's avatar
Nick Piggin committed
	unlock_page(vmf.page);
Nick Piggin's avatar
Nick Piggin committed
		page_cache_release(vmf.page);
	else if (dirty_page) {
		if (vma->vm_file)
			file_update_time(vma->vm_file);

		set_page_dirty_balance(dirty_page, page_mkwrite);
		put_page(dirty_page);
	}
Nick Piggin's avatar
Nick Piggin committed
	return ret;
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
		int write_access, pte_t orig_pte)
{
	pgoff_t pgoff = (((address & PAGE_MASK)
			- vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
	unsigned int flags = (write_access ? FAULT_FLAG_WRITE : 0);

	pte_unmap(page_table);
	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
Linus Torvalds's avatar
Linus Torvalds committed

Jes Sorensen's avatar
Jes Sorensen committed
/*
 * do_no_pfn() tries to create a new page mapping for a page without
 * a struct_page backing it
 *
 * As this is called only for pages that do not currently exist, we
 * do not need to flush old virtual caches or the TLB.
 *
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
 *
 * It is expected that the ->nopfn handler always returns the same pfn
 * for a given virtual mapping.
 *
 * Mark this `noinline' to prevent it from bloating the main pagefault code.
 */
static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
		     unsigned long address, pte_t *page_table, pmd_t *pmd,
		     int write_access)
{
	spinlock_t *ptl;
	pte_t entry;
	unsigned long pfn;

	pte_unmap(page_table);
	BUG_ON(!(vma->vm_flags & VM_PFNMAP));
	BUG_ON(is_cow_mapping(vma->vm_flags));

	pfn = vma->vm_ops->nopfn(vma, address & PAGE_MASK);
	if (unlikely(pfn == NOPFN_OOM))
Jes Sorensen's avatar
Jes Sorensen committed
		return VM_FAULT_OOM;
	else if (unlikely(pfn == NOPFN_SIGBUS))
Jes Sorensen's avatar
Jes Sorensen committed
		return VM_FAULT_SIGBUS;
	else if (unlikely(pfn == NOPFN_REFAULT))
Nick Piggin's avatar
Nick Piggin committed
		return 0;
Jes Sorensen's avatar
Jes Sorensen committed

	page_table = pte_offset_map_lock(mm, pmd, address, &ptl);

	/* Only go through if we didn't race with anybody else... */
	if (pte_none(*page_table)) {
		entry = pfn_pte(pfn, vma->vm_page_prot);
		if (write_access)
			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
		set_pte_at(mm, address, page_table, entry);
	}
	pte_unmap_unlock(page_table, ptl);
Nick Piggin's avatar
Nick Piggin committed
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
/*
 * Fault of a previously existing named mapping. Repopulate the pte
 * from the encoded file_pte if possible. This enables swappable
 * nonlinear vmas.
 *
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
Linus Torvalds's avatar
Linus Torvalds committed
 */
Nick Piggin's avatar
Nick Piggin committed
static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
		unsigned long address, pte_t *page_table, pmd_t *pmd,
		int write_access, pte_t orig_pte)
Linus Torvalds's avatar
Linus Torvalds committed
{
Nick Piggin's avatar
Nick Piggin committed
	unsigned int flags = FAULT_FLAG_NONLINEAR |
				(write_access ? FAULT_FLAG_WRITE : 0);
	pgoff_t pgoff;
Linus Torvalds's avatar
Linus Torvalds committed

	if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
Nick Piggin's avatar
Nick Piggin committed
		return 0;
Linus Torvalds's avatar
Linus Torvalds committed

Nick Piggin's avatar
Nick Piggin committed
	if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
			!(vma->vm_flags & VM_CAN_NONLINEAR))) {
		/*
		 * Page table corrupted: show pte and kill process.
		 */
		print_bad_pte(vma, orig_pte, address);
		return VM_FAULT_OOM;
	}

	pgoff = pte_to_pgoff(orig_pte);
	return __do_fault(mm, vma, address, pmd, pgoff, flags, orig_pte);
Linus Torvalds's avatar
Linus Torvalds committed
}

/*
 * These routines also need to handle stuff like marking pages dirty
 * and/or accessed for architectures that don't do it in hardware (most
 * RISC architectures).  The early dirtying is also good on the i386.
 *
 * There is also a hook called "update_mmu_cache()" that architectures
 * with external mmu caches can use to update those (ie the Sparc or
 * PowerPC hashed page tables that act as extended TLBs).
 *
 * We enter with non-exclusive mmap_sem (to exclude vma changes,
 * but allow concurrent faults), and pte mapped but not yet locked.
 * We return with mmap_sem still held, but pte unmapped and unlocked.
Linus Torvalds's avatar
Linus Torvalds committed
 */
static inline int handle_pte_fault(struct mm_struct *mm,
		struct vm_area_struct *vma, unsigned long address,
		pte_t *pte, pmd_t *pmd, int write_access)
Linus Torvalds's avatar
Linus Torvalds committed
{
	pte_t entry;
	spinlock_t *ptl;
Linus Torvalds's avatar
Linus Torvalds committed

Linus Torvalds's avatar
Linus Torvalds committed
	if (!pte_present(entry)) {
		if (pte_none(entry)) {
Jes Sorensen's avatar
Jes Sorensen committed
			if (vma->vm_ops) {
				if (vma->vm_ops->fault || vma->vm_ops->nopage)
					return do_linear_fault(mm, vma, address,
						pte, pmd, write_access, entry);
Jes Sorensen's avatar
Jes Sorensen committed
				if (unlikely(vma->vm_ops->nopfn))
					return do_no_pfn(mm, vma, address, pte,
							 pmd, write_access);
			}
			return do_anonymous_page(mm, vma, address,
						 pte, pmd, write_access);
Linus Torvalds's avatar
Linus Torvalds committed
		if (pte_file(entry))
Nick Piggin's avatar
Nick Piggin committed
			return do_nonlinear_fault(mm, vma, address,
					pte, pmd, write_access, entry);
		return do_swap_page(mm, vma, address,
					pte, pmd, write_access, entry);
	ptl = pte_lockptr(mm, pmd);
	spin_lock(ptl);
	if (unlikely(!pte_same(*pte, entry)))
		goto unlock;
Linus Torvalds's avatar
Linus Torvalds committed
	if (write_access) {
		if (!pte_write(entry))
			return do_wp_page(mm, vma, address,
					pte, pmd, ptl, entry);
Linus Torvalds's avatar
Linus Torvalds committed
		entry = pte_mkdirty(entry);
	}
	entry = pte_mkyoung(entry);
	if (ptep_set_access_flags(vma, address, pte, entry, write_access)) {
		update_mmu_cache(vma, address, entry);
	} else {
		/*
		 * This is needed only for protection faults but the arch code
		 * is not yet telling us if this is a protection fault or not.
		 * This still avoids useless tlb flushes for .text page faults
		 * with threads.
		 */
		if (write_access)
			flush_tlb_page(vma, address);
	}
unlock:
	pte_unmap_unlock(pte, ptl);
Nick Piggin's avatar
Nick Piggin committed
	return 0;
Linus Torvalds's avatar
Linus Torvalds committed
}

/*
 * By the time we get here, we already hold the mm semaphore
 */
Nick Piggin's avatar
Nick Piggin committed
int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
Linus Torvalds's avatar
Linus Torvalds committed
		unsigned long address, int write_access)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	__set_current_state(TASK_RUNNING);

	count_vm_event(PGFAULT);
Linus Torvalds's avatar
Linus Torvalds committed

	if (unlikely(is_vm_hugetlb_page(vma)))
		return hugetlb_fault(mm, vma, address, write_access);
Linus Torvalds's avatar
Linus Torvalds committed

	pgd = pgd_offset(mm, address);
	pud = pud_alloc(mm, pgd, address);
	if (!pud)
		return VM_FAULT_OOM;
Linus Torvalds's avatar
Linus Torvalds committed
	pmd = pmd_alloc(mm, pud, address);
	if (!pmd)
		return VM_FAULT_OOM;
Linus Torvalds's avatar
Linus Torvalds committed
	pte = pte_alloc_map(mm, pmd, address);
	if (!pte)
		return VM_FAULT_OOM;
Linus Torvalds's avatar
Linus Torvalds committed

	return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
Linus Torvalds's avatar
Linus Torvalds committed
}

#ifndef __PAGETABLE_PUD_FOLDED
/*
 * Allocate page upper directory.
 * We've already handled the fast-path in-line.
Linus Torvalds's avatar
Linus Torvalds committed
 */
int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
{
	pud_t *new = pud_alloc_one(mm, address);
	if (!new)
		return -ENOMEM;
Linus Torvalds's avatar
Linus Torvalds committed

	spin_lock(&mm->page_table_lock);
	if (pgd_present(*pgd))		/* Another has populated it */
	else
		pgd_populate(mm, pgd, new);
	spin_unlock(&mm->page_table_lock);
Linus Torvalds's avatar
Linus Torvalds committed
}
#endif /* __PAGETABLE_PUD_FOLDED */

#ifndef __PAGETABLE_PMD_FOLDED
/*
 * Allocate page middle directory.
 * We've already handled the fast-path in-line.
Linus Torvalds's avatar
Linus Torvalds committed
 */
int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
Linus Torvalds's avatar
Linus Torvalds committed
{
	pmd_t *new = pmd_alloc_one(mm, address);
	if (!new)
		return -ENOMEM;
Linus Torvalds's avatar
Linus Torvalds committed

	spin_lock(&mm->page_table_lock);
Linus Torvalds's avatar
Linus Torvalds committed
#ifndef __ARCH_HAS_4LEVEL_HACK
	if (pud_present(*pud))		/* Another has populated it */
	else
		pud_populate(mm, pud, new);
Linus Torvalds's avatar
Linus Torvalds committed
#else
	if (pgd_present(*pud))		/* Another has populated it */
	else
		pgd_populate(mm, pud, new);
Linus Torvalds's avatar
Linus Torvalds committed
#endif /* __ARCH_HAS_4LEVEL_HACK */
	spin_unlock(&mm->page_table_lock);
Linus Torvalds's avatar
Linus Torvalds committed
#endif /* __PAGETABLE_PMD_FOLDED */

int make_pages_present(unsigned long addr, unsigned long end)
{
	int ret, len, write;
	struct vm_area_struct * vma;

	vma = find_vma(current->mm, addr);
	if (!vma)
		return -1;
	write = (vma->vm_flags & VM_WRITE) != 0;
	BUG_ON(addr >= end);
	BUG_ON(end > vma->vm_end);
	len = DIV_ROUND_UP(end, PAGE_SIZE) - addr/PAGE_SIZE;
Linus Torvalds's avatar
Linus Torvalds committed
	ret = get_user_pages(current, current->mm, addr,
			len, write, 0, NULL, NULL);
	if (ret < 0)
		return ret;
	return ret == len ? 0 : -1;
}

#if !defined(__HAVE_ARCH_GATE_AREA)

#if defined(AT_SYSINFO_EHDR)
static struct vm_area_struct gate_vma;
Linus Torvalds's avatar
Linus Torvalds committed

static int __init gate_vma_init(void)
{
	gate_vma.vm_mm = NULL;
	gate_vma.vm_start = FIXADDR_USER_START;
	gate_vma.vm_end = FIXADDR_USER_END;
	gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
	gate_vma.vm_page_prot = __P101;
	/*
	 * Make sure the vDSO gets into every core dump.
	 * Dumping its contents makes post-mortem fully interpretable later
	 * without matching up the same kernel and hardware config to see
	 * what PC values meant.
	 */
	gate_vma.vm_flags |= VM_ALWAYSDUMP;
Linus Torvalds's avatar
Linus Torvalds committed
	return 0;
}
__initcall(gate_vma_init);
#endif

struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
{
#ifdef AT_SYSINFO_EHDR
	return &gate_vma;
#else
	return NULL;
#endif
}

int in_gate_area_no_task(unsigned long addr)
{
#ifdef AT_SYSINFO_EHDR
	if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
		return 1;
#endif
	return 0;
}

#endif	/* __HAVE_ARCH_GATE_AREA */

/*
 * Access another process' address space.
 * Source/target buffer must be kernel space,
 * Do not walk the page table directly, use get_user_pages
 */
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	struct page *page;
	void *old_buf = buf;

	mm = get_task_mm(tsk);
	if (!mm)
		return 0;

	down_read(&mm->mmap_sem);
Simon Arlott's avatar
Simon Arlott committed
	/* ignore errors, just check how much was successfully transferred */
	while (len) {
		int bytes, ret, offset;
		void *maddr;

		ret = get_user_pages(tsk, mm, addr, 1,
				write, 1, &page, &vma);
		if (ret <= 0)
			break;

		bytes = len;
		offset = addr & (PAGE_SIZE-1);
		if (bytes > PAGE_SIZE-offset)
			bytes = PAGE_SIZE-offset;

		maddr = kmap(page);
		if (write) {
			copy_to_user_page(vma, page, addr,
					  maddr + offset, buf, bytes);
			set_page_dirty_lock(page);
		} else {
			copy_from_user_page(vma, page, addr,
					    buf, maddr + offset, bytes);
		}
		kunmap(page);
		page_cache_release(page);
		len -= bytes;
		buf += bytes;
		addr += bytes;
	}
	up_read(&mm->mmap_sem);
	mmput(mm);

	return buf - old_buf;
}

/*
 * Print the name of a VMA.
 */
void print_vma_addr(char *prefix, unsigned long ip)
{
	struct mm_struct *mm = current->mm;
	struct vm_area_struct *vma;

	down_read(&mm->mmap_sem);
	vma = find_vma(mm, ip);
	if (vma && vma->vm_file) {
		struct file *f = vma->vm_file;
		char *buf = (char *)__get_free_page(GFP_KERNEL);
		if (buf) {
			char *p, *s;

			p = d_path(f->f_dentry, f->f_vfsmnt, buf, PAGE_SIZE);
			if (IS_ERR(p))
				p = "?";
			s = strrchr(p, '/');
			if (s)
				p = s+1;
			printk("%s%s[%lx+%lx]", prefix, p,
					vma->vm_start,
					vma->vm_end - vma->vm_start);
			free_page((unsigned long)buf);
		}
	}
	up_read(&current->mm->mmap_sem);
}