--- pre-2.3.11-1_shm/ipc/shm.c.2 Tue Jul 13 14:38:00 1999 +++ pre-2.3.11-1_shm/ipc/shm.c Tue Jul 13 14:42:05 1999 @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -626,6 +627,8 @@ pte_t pte; struct shmid_kernel *shp; unsigned int id, idx; + unsigned long page; + struct page * page_map; id = SWP_OFFSET(shmd->vm_pte) & SHM_ID_MASK; idx = (address - shmd->vm_start + shmd->vm_offset) >> PAGE_SHIFT; @@ -650,24 +653,34 @@ #endif lock_kernel(); + again: pte = __pte(shp->shm_pages[idx]); if (!pte_present(pte)) { - unsigned long page = get_free_page(GFP_USER); - if (!page) - goto oom; - pte = __pte(shp->shm_pages[idx]); - if (pte_present(pte)) { - free_page (page); /* doesn't sleep */ - goto done; + if (pte_none(pte)) + { + page = get_free_page(GFP_USER); + if (!page) + goto oom; + if (pte_val(pte) != shp->shm_pages[idx]) + goto changed; } - if (!pte_none(pte)) { - rw_swap_page_nocache(READ, pte_val(pte), (char *)page); - pte = __pte(shp->shm_pages[idx]); - if (pte_present(pte)) { - free_page (page); /* doesn't sleep */ - goto done; + else + { + unsigned long entry = pte_val(pte); + + page_map = lookup_swap_cache(entry); + if (!page_map) { + swapin_readahead(entry); + page_map = read_swap_cache(entry); } - swap_free(pte_val(pte)); + pte = __pte(shp->shm_pages[idx]); + page = page_address(page_map); + if (pte_present(pte)) + goto present; + if (!page_map) + goto oom; + delete_from_swap_cache(page_map); + swap_free(entry); shm_swp--; } shm_rss++; @@ -681,6 +694,14 @@ current->min_flt++; get_page(mem_map + MAP_NR(pte_page(pte))); return pte_page(pte); + +changed: + free_page(page); + goto again; +present: + if (page_map) + free_page_and_swap_cache(page); + goto done; oom: unlock_kernel(); return -1; @@ -700,6 +721,7 @@ unsigned long id, idx; int loop = 0; int counter; + struct page * page_map; counter = shm_rss >> prio; if (!counter || !(swap_nr = get_swap_page())) @@ -728,7 +750,8 @@ page = __pte(shp->shm_pages[idx]); if (!pte_present(page)) goto check_table; - if ((gfp_mask & __GFP_DMA) && !PageDMA(&mem_map[MAP_NR(pte_page(page))])) + page_map = &mem_map[MAP_NR(pte_page(page))]; + if ((gfp_mask & __GFP_DMA) && !PageDMA(page_map)) goto check_table; swap_attempts++; @@ -740,8 +763,11 @@ if (page_count(mem_map + MAP_NR(pte_page(page))) != 1) goto check_table; shp->shm_pages[idx] = swap_nr; - rw_swap_page_nocache (WRITE, swap_nr, (char *) pte_page(page)); - free_page(pte_page(page)); + swap_duplicate(swap_nr); + add_to_swap_cache(page_map, swap_nr); + rw_swap_page(WRITE, page_map, 0); + + __free_page(page_map); swap_successes++; shm_swp++; shm_rss--; --- pre-2.3.11-1/include/linux/mm.h Tue Jul 13 02:41:46 1999 +++ pre-2.3.11-1_shm/include/linux/mm.h Tue Jul 13 02:47:33 1999 @@ -328,6 +328,7 @@ extern void show_mem(void); extern void oom(struct task_struct * tsk); extern void si_meminfo(struct sysinfo * val); +extern void swapin_readahead(unsigned long); /* mmap.c */ extern void vma_init(void); --- pre-2.3.11-1_shm/mm/memory.c~ Tue Jul 13 14:32:42 1999 +++ pre-2.3.11-1_shm/mm/memory.c Tue Jul 13 14:40:03 1999 @@ -781,7 +781,7 @@ * because it doesn't cost us any seek time. We also make sure to queue * the 'original' request together with the readahead ones... */ -static void swapin_readahead(unsigned long entry) +void swapin_readahead(unsigned long entry) { int i; struct page *new_page;