Abstract out the pte setting loop from do_anonymous_page(). diff -prauN pgcl-2.5.70-bk13-3/mm/memory.c pgcl-2.5.70-bk13-4/mm/memory.c --- pgcl-2.5.70-bk13-3/mm/memory.c 2003-06-08 09:24:21.000000000 -0700 +++ pgcl-2.5.70-bk13-4/mm/memory.c 2003-06-08 09:42:15.000000000 -0700 @@ -1074,6 +1074,54 @@ static pte_t *fill_anonymizable_ptevec(s return page_table; } + +static int set_anon_ptes(struct vm_area_struct *vma, struct page *page, + unsigned long addr, pte_t *ptes[], + struct pte_chain *pte_chain, int write_access) +{ + unsigned long pfn, subpfn; + int rss; + + pfn = page_to_pfn(page); + rss = 0; + for (subpfn = 0; subpfn < PAGE_MMUCOUNT; ++subpfn) { + pte_t pte; + + pr_debug("subpfn = 0x%lx, ptep = 0x%p\n", subpfn, ptes[subpfn]); + + if (!ptes[subpfn]) { + pr_debug("pte empty\n"); + continue; + } else if (!pte_none(*ptes[subpfn])) { + pr_debug("pte non-none\n"); + continue; + } + + pte = pfn_pte(pfn + subpfn, vma->vm_page_prot); + if (!write_access) { + pr_debug("setting pte to zero page\n"); + set_pte(ptes[subpfn], pte_wrprotect(pte)); + } else { + pr_debug("setting pte to newly zeroed anonymous page\n"); + BUG_ON(!pte_chain); + pr_debug("setting pte for anonymous zeroed page\n"); + pr_debug("ptep = 0x%p, pte = 0x%Lx\n", + ptes[subpfn], (u64)pte_val(pte)); + set_pte(ptes[subpfn], pte_mkwrite(pte_mkdirty(pte))); + pr_debug("about to page_add_rmap()\n"); + pte_chain = page_add_rmap_chained(page, ptes[subpfn], + pte_chain); + pr_debug("about to update_mmu_cache()\n"); + update_mmu_cache(vma, addr, pte); + rss++; + pr_debug("about to page_cache_get()\n"); + page_cache_get(page); + } + pr_debug("falling through to next subpfn\n"); + } + return rss; +} + /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address @@ -1100,6 +1148,7 @@ static int do_wp_page(struct mm_struct * struct page *old_page, *new_page; unsigned long pfn = pte_pfn(pte); struct pte_chain *pte_chain = NULL; + pte_t *ptes[PAGE_MMUCOUNT] = { [0 ... PAGE_MMUCOUNT-1] = NULL }; int ret; if (unlikely(!pfn_valid(pfn))) { @@ -1457,10 +1506,7 @@ do_anonymous_page(struct mm_struct *mm, { struct page *page = NULL; struct pte_chain *pte_chain = NULL; - unsigned long up_vaddr, dn_vaddr, lo_vaddr, hi_vaddr; - unsigned long pfn, subpfn, dn_subpfn, up_subpfn; pte_t *ptes[PAGE_MMUCOUNT] = { [0 ... PAGE_MMUCOUNT-1] = NULL }; - pte_t *up_pte, *dn_pte; int rss, ret = VM_FAULT_MINOR; if (write_access) @@ -1493,44 +1539,7 @@ do_anonymous_page(struct mm_struct *mm, spin_lock(&mm->page_table_lock); page_table = fill_anonymizable_ptevec(vma, pmd, page_table, ptes, addr, write_access); - - pfn = page_to_pfn(page); - rss = 0; - for (subpfn = 0; subpfn < PAGE_MMUCOUNT; ++subpfn) { - pte_t pte; - - pr_debug("subpfn = 0x%lx, ptep = 0x%p\n", subpfn, ptes[subpfn]); - - if (!ptes[subpfn]) { - pr_debug("pte empty\n"); - continue; - } else if (!pte_none(*ptes[subpfn])) { - pr_debug("pte non-none\n"); - continue; - } - - pte = pfn_pte(pfn + subpfn, vma->vm_page_prot); - if (!write_access) { - pr_debug("setting pte to zero page\n"); - set_pte(ptes[subpfn], pte_wrprotect(pte)); - } else { - pr_debug("setting pte to newly zeroed anonymous page\n"); - BUG_ON(!pte_chain); - pr_debug("setting pte for anonymous zeroed page\n"); - pr_debug("ptep = 0x%p, pte = 0x%Lx\n", - ptes[subpfn], (u64)pte_val(pte)); - set_pte(ptes[subpfn], pte_mkwrite(pte_mkdirty(pte))); - pr_debug("about to page_add_rmap()\n"); - pte_chain = page_add_rmap_chained(page, ptes[subpfn], - pte_chain); - pr_debug("about to update_mmu_cache()\n"); - update_mmu_cache(vma, addr, pte); - rss++; - pr_debug("about to page_cache_get()\n"); - page_cache_get(page); - } - pr_debug("falling through to next subpfn\n"); - } + rss = set_anon_ptes(vma, page, addr, ptes, pte_chain, write_access); pr_debug("doing pte_unmap(0x%p)\n", page_table); pte_unmap(page_table); pr_debug("adding %d to rss\n", rss);