===== arch/i386/kernel/entry.S 1.89 vs edited ===== --- 1.89/arch/i386/kernel/entry.S 2005-01-08 06:44:02 +01:00 +++ edited/arch/i386/kernel/entry.S 2005-01-12 20:22:18 +01:00 @@ -864,5 +864,7 @@ ENTRY(sys_call_table) .long sys_add_key .long sys_request_key .long sys_keyctl + .long sys_ioprio_set + .long sys_ioprio_get /* 290 */ syscall_table_size=(.-sys_call_table) ===== arch/ia64/kernel/entry.S 1.69 vs edited ===== --- 1.69/arch/ia64/kernel/entry.S 2004-11-23 21:18:30 +01:00 +++ edited/arch/ia64/kernel/entry.S 2005-01-12 20:22:18 +01:00 @@ -1531,8 +1531,8 @@ sys_call_table: data8 sys_add_key data8 sys_request_key data8 sys_keyctl - data8 sys_ni_syscall - data8 sys_ni_syscall // 1275 + data8 sys_ioprio_set + data8 sys_ioprio_get // 1275 data8 sys_ni_syscall data8 sys_ni_syscall data8 sys_ni_syscall ===== arch/ppc/kernel/misc.S 1.64 vs edited ===== --- 1.64/arch/ppc/kernel/misc.S 2005-01-04 00:49:20 +01:00 +++ edited/arch/ppc/kernel/misc.S 2005-01-12 20:22:18 +01:00 @@ -1450,3 +1450,5 @@ _GLOBAL(sys_call_table) .long sys_add_key .long sys_request_key /* 270 */ .long sys_keyctl + .long sys_ioprio_set + .long sys_ioprio_get ===== drivers/block/Kconfig.iosched 1.5 vs edited ===== --- 1.5/drivers/block/Kconfig.iosched 2004-10-20 08:40:45 +02:00 +++ edited/drivers/block/Kconfig.iosched 2005-01-12 20:22:18 +01:00 @@ -38,4 +38,17 @@ config IOSCHED_CFQ among all processes in the system. It should provide a fair working environment, suitable for desktop systems. +config IOPRIO_WRITE + bool "Support for full write io fairness" + depends on IOSCHED_CFQ + ---help--- + Most data writeout that isn't direct or raw happens asynchronously + and thus not in the context of the process that originally dirtied + the data. This type of writeback is typically handled by the pdflush + kernel threads. The result is that a process dirtying a lot of pages + gets proportionally more bandwidth than it should for writes. + + Say Y here it you want to apply full fairness for file system + asynchronous writes at the expense of adding 4 bytes to struct page. + endmenu ===== drivers/block/as-iosched.c 1.42 vs edited ===== --- 1.42/drivers/block/as-iosched.c 2004-10-30 01:28:05 +02:00 +++ edited/drivers/block/as-iosched.c 2005-01-12 20:22:18 +01:00 @@ -1805,7 +1805,8 @@ static void as_put_request(request_queue rq->elevator_private = NULL; } -static int as_set_request(request_queue_t *q, struct request *rq, int gfp_mask) +static int as_set_request(request_queue_t *q, struct request *rq, + struct bio *bio, int gfp_mask) { struct as_data *ad = q->elevator->elevator_data; struct as_rq *arq = mempool_alloc(ad->arq_pool, gfp_mask); @@ -1826,7 +1827,7 @@ static int as_set_request(request_queue_ return 1; } -static int as_may_queue(request_queue_t *q, int rw) +static int as_may_queue(request_queue_t *q, int rw, struct bio *bio) { int ret = ELV_MQUEUE_MAY; struct as_data *ad = q->elevator->elevator_data; ===== drivers/block/cfq-iosched.c 1.18 vs edited ===== --- 1.18/drivers/block/cfq-iosched.c 2005-01-11 10:03:17 +01:00 +++ edited/drivers/block/cfq-iosched.c 2005-01-12 20:22:18 +01:00 @@ -21,22 +21,32 @@ #include #include #include - -static unsigned long max_elapsed_crq; -static unsigned long max_elapsed_dispatch; +#include +#include /* * tunables */ static int cfq_quantum = 4; /* max queue in one round of service */ static int cfq_queued = 8; /* minimum rq allocate limit per-queue*/ -static int cfq_service = HZ; /* period over which service is avg */ static int cfq_fifo_expire_r = HZ / 2; /* fifo timeout for sync requests */ static int cfq_fifo_expire_w = 5 * HZ; /* fifo timeout for async requests */ static int cfq_fifo_rate = HZ / 8; /* fifo expiry rate */ static int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ static int cfq_back_penalty = 2; /* penalty of a backwards seek */ +static int cfq_slice_sync = HZ / 45; +static int cfq_slice_async = HZ / 100; +static int cfq_slice_async_rq = 2; +static int cfq_slice_idle = HZ / 100; + +#define CFQ_IDLE_GRACE (HZ / 10) + +/* + * disable queueing at the driver/hardware level + */ +static int cfq_max_depth = 1; + /* * for the hash of cfqq inside the cfqd */ @@ -55,6 +65,7 @@ static int cfq_back_penalty = 2; /* pena #define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash) #define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list) +#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist) #define RQ_DATA(rq) (rq)->elevator_private @@ -75,51 +86,78 @@ static int cfq_back_penalty = 2; /* pena #define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node) #define rq_rb_key(rq) (rq)->sector -/* - * threshold for switching off non-tag accounting - */ -#define CFQ_MAX_TAG (4) - -/* - * sort key types and names - */ -enum { - CFQ_KEY_PGID, - CFQ_KEY_TGID, - CFQ_KEY_UID, - CFQ_KEY_GID, - CFQ_KEY_LAST, -}; - -static char *cfq_key_types[] = { "pgid", "tgid", "uid", "gid", NULL }; - static kmem_cache_t *crq_pool; static kmem_cache_t *cfq_pool; static kmem_cache_t *cfq_ioc_pool; +static spinlock_t __cacheline_aligned_in_smp cfq_cic_lock = SPIN_LOCK_UNLOCKED; + +#define CFQ_PRIO_LISTS IOPRIO_BE_NR +#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) +#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) +#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT) + +#define CFQ_ASYNC (0) +#define CFQ_SYNC (1) + +/* + * Per block device queue structure + */ struct cfq_data { - struct list_head rr_list; + atomic_t ref; + request_queue_t *queue; + + /* + * rr list of queues with requests and the count of them + */ + struct list_head rr_list[CFQ_PRIO_LISTS]; + struct list_head busy_rr; + struct list_head cur_rr; + struct list_head idle_rr; + unsigned int busy_queues; + + /* + * non-ordered list of empty cfqq's + */ struct list_head empty_list; + /* + * cfqq lookup hash + */ struct hlist_head *cfq_hash; - struct hlist_head *crq_hash; - /* queues on rr_list (ie they have pending requests */ - unsigned int busy_queues; + /* + * global crq hash for all queues + */ + struct hlist_head *crq_hash; unsigned int max_queued; - atomic_t ref; + mempool_t *crq_pool; - int key_type; + int rq_in_driver; - mempool_t *crq_pool; + /* + * schedule slice state info + */ + /* + * idle window management + */ + struct timer_list idle_slice_timer; + struct work_struct unplug_work; + unsigned long idle_start; + + struct cfq_queue *active_queue; + struct cfq_io_context *active_cic; + int cur_prio, cur_end_prio, end_prio; + unsigned int dispatch_slice; - request_queue_t *queue; + struct timer_list idle_class_timer; sector_t last_sector; + unsigned long last_end_request; - int rq_in_driver; + unsigned int rq_starved; /* * tunables, see top of file @@ -131,28 +169,31 @@ struct cfq_data { unsigned int cfq_fifo_batch_expire; unsigned int cfq_back_penalty; unsigned int cfq_back_max; - unsigned int find_best_crq; - - unsigned int cfq_tagged; + unsigned int cfq_slice[2]; + unsigned int cfq_slice_async_rq; + unsigned int cfq_slice_idle; + unsigned int cfq_max_depth; }; +/* + * Per process-grouping structure + */ struct cfq_queue { /* reference count */ atomic_t ref; /* parent cfq_data */ struct cfq_data *cfqd; - /* hash of mergeable requests */ + /* cfqq lookup hash */ struct hlist_node cfq_hash; /* hash key */ - unsigned long key; - /* whether queue is on rr (or empty) list */ - int on_rr; + unsigned int key; /* on either rr or empty list of cfqd */ struct list_head cfq_list; /* sorted list of pending requests */ - struct rb_root sort_list; + struct rb_root sort_list[2]; + int dir; /* if fifo isn't expired, next request to serve */ - struct cfq_rq *next_crq; + struct cfq_rq *next_crq[2]; /* requests queued in sort_list */ int queued[2]; /* currently allocated requests */ @@ -162,17 +203,33 @@ struct cfq_queue { /* last time fifo expired */ unsigned long last_fifo_expire; - int key_type; - - unsigned long service_start; - unsigned long service_used; - - unsigned int max_rate; + unsigned long slice_start; + unsigned long slice_end; + unsigned long slice_left; + unsigned long service_last; /* number of requests that have been handed to the driver */ int in_flight; - /* number of currently allocated requests */ - int alloc_limit[2]; + + /* io prio of this group */ + int ioprio; + int org_ioprio; + short ioprio_class; + short org_ioprio_class; + + unsigned int idle_hit, idle_miss; + + /* whether queue is on rr (or empty) list */ + unsigned on_rr : 1; + /* idle slice, waiting for new request submission */ + unsigned wait_request : 1; + /* set when wait_request gets set, reset on first rq alloc */ + unsigned must_alloc : 1; + /* idle slice, request added, now waiting to dispatch it */ + unsigned must_dispatch : 1; + + unsigned idle_window : 1; + unsigned prio_changed : 1; }; struct cfq_rq { @@ -184,42 +241,20 @@ struct cfq_rq { struct cfq_queue *cfq_queue; struct cfq_io_context *io_context; - unsigned long service_start; - unsigned long queue_start; + sector_t end_pos; - unsigned int in_flight : 1; - unsigned int accounted : 1; - unsigned int is_sync : 1; - unsigned int is_write : 1; + unsigned in_flight : 1; + unsigned accounted : 1; + unsigned is_sync : 1; + unsigned requeued : 1; }; -static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned long); +static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int); static void cfq_dispatch_sort(request_queue_t *, struct cfq_rq *); static void cfq_update_next_crq(struct cfq_rq *); static void cfq_put_cfqd(struct cfq_data *cfqd); -/* - * what the fairness is based on (ie how processes are grouped and - * differentiated) - */ -static inline unsigned long -cfq_hash_key(struct cfq_data *cfqd, struct task_struct *tsk) -{ - /* - * optimize this so that ->key_type is the offset into the struct - */ - switch (cfqd->key_type) { - case CFQ_KEY_PGID: - return process_group(tsk); - default: - case CFQ_KEY_TGID: - return tsk->tgid; - case CFQ_KEY_UID: - return tsk->uid; - case CFQ_KEY_GID: - return tsk->gid; - } -} +#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) /* * lots of deadline iosched dupes, can be abstracted later... @@ -243,8 +278,6 @@ static inline void cfq_add_crq_hash(stru { const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request)); - BUG_ON(!hlist_unhashed(&crq->hash)); - hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]); } @@ -257,8 +290,6 @@ static struct request *cfq_find_rq_hash( struct cfq_rq *crq = list_entry_hash(entry); struct request *__rq = crq->request; - BUG_ON(hlist_unhashed(&crq->hash)); - if (!rq_mergeable(__rq)) { cfq_del_crq_hash(crq); continue; @@ -287,36 +318,16 @@ cfq_choose_req(struct cfq_data *cfqd, st return crq2; if (crq2 == NULL) return crq1; + if (crq1->requeued) + return crq1; + if (crq2->requeued) + return crq2; s1 = crq1->request->sector; s2 = crq2->request->sector; last = cfqd->last_sector; -#if 0 - if (!list_empty(&cfqd->queue->queue_head)) { - struct list_head *entry = &cfqd->queue->queue_head; - unsigned long distance = ~0UL; - struct request *rq; - - while ((entry = entry->prev) != &cfqd->queue->queue_head) { - rq = list_entry_rq(entry); - - if (blk_barrier_rq(rq)) - break; - - if (distance < abs(s1 - rq->sector + rq->nr_sectors)) { - distance = abs(s1 - rq->sector +rq->nr_sectors); - last = rq->sector + rq->nr_sectors; - } - if (distance < abs(s2 - rq->sector + rq->nr_sectors)) { - distance = abs(s2 - rq->sector +rq->nr_sectors); - last = rq->sector + rq->nr_sectors; - } - } - } -#endif - /* * by definition, 1KiB is 2 sectors */ @@ -381,7 +392,7 @@ cfq_find_next_crq(struct cfq_data *cfqd, return NULL; if ((rbnext = rb_next(&last->rb_node)) == NULL) - rbnext = rb_first(&cfqq->sort_list); + rbnext = rb_first(&cfqq->sort_list[cfqq->dir]); rbprev = rb_prev(&last->rb_node); @@ -396,72 +407,59 @@ cfq_find_next_crq(struct cfq_data *cfqd, static void cfq_update_next_crq(struct cfq_rq *crq) { struct cfq_queue *cfqq = crq->cfq_queue; + int rw = crq->is_sync; - if (cfqq->next_crq == crq) - cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); + if (cfqq->next_crq[rw] == crq) + cfqq->next_crq[rw] = cfq_find_next_crq(cfqq->cfqd, cfqq, crq); } -static int cfq_check_sort_rr_list(struct cfq_queue *cfqq) +static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted) { - struct list_head *head = &cfqq->cfqd->rr_list; - struct list_head *next, *prev; - - /* - * list might still be ordered - */ - next = cfqq->cfq_list.next; - if (next != head) { - struct cfq_queue *cnext = list_entry_cfqq(next); + struct cfq_data *cfqd = cfqq->cfqd; + struct list_head *list, *entry; - if (cfqq->service_used > cnext->service_used) - return 1; - } + BUG_ON(!cfqq->on_rr); - prev = cfqq->cfq_list.prev; - if (prev != head) { - struct cfq_queue *cprev = list_entry_cfqq(prev); + list_del(&cfqq->cfq_list); - if (cfqq->service_used < cprev->service_used) - return 1; + if (cfq_class_rt(cfqq)) + list = &cfqd->cur_rr; + else if (cfq_class_idle(cfqq)) + list = &cfqd->idle_rr; + else { + /* + * if cfqq has requests in flight, don't allow it to be + * found in cfq_set_active_queue before it has finished them. + * this is done to increase fairness between a process that + * has lots of io pending vs one that only generates one + * sporadically or synchronously + */ + if (cfqq->in_flight) + list = &cfqd->busy_rr; + else + list = &cfqd->rr_list[cfqq->ioprio]; } - return 0; -} - -static void cfq_sort_rr_list(struct cfq_queue *cfqq, int new_queue) -{ - struct list_head *entry = &cfqq->cfqd->rr_list; - - if (!cfqq->on_rr) - return; - if (!new_queue && !cfq_check_sort_rr_list(cfqq)) + /* + * if queue was preempted, just add to front to be fair. busy_rr + * isn't sorted. + */ + if (preempted || list == &cfqd->busy_rr) { + list_add(&cfqq->cfq_list, list); return; - - list_del(&cfqq->cfq_list); + } /* - * sort by our mean service_used, sub-sort by in-flight requests + * sort by when queue was last serviced */ - while ((entry = entry->prev) != &cfqq->cfqd->rr_list) { + entry = list; + while ((entry = entry->prev) != list) { struct cfq_queue *__cfqq = list_entry_cfqq(entry); - if (cfqq->service_used > __cfqq->service_used) + if (!__cfqq->service_last) + break; + if (time_before(__cfqq->service_last, cfqq->service_last)) break; - else if (cfqq->service_used == __cfqq->service_used) { - struct list_head *prv; - - while ((prv = entry->prev) != &cfqq->cfqd->rr_list) { - __cfqq = list_entry_cfqq(prv); - - WARN_ON(__cfqq->service_used > cfqq->service_used); - if (cfqq->service_used != __cfqq->service_used) - break; - if (cfqq->in_flight > __cfqq->in_flight) - break; - - entry = prv; - } - } } list_add(&cfqq->cfq_list, entry); @@ -469,28 +467,24 @@ static void cfq_sort_rr_list(struct cfq_ /* * add to busy list of queues for service, trying to be fair in ordering - * the pending list according to requests serviced + * the pending list according to last request service */ static inline void -cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) +cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq, int requeue) { - /* - * it's currently on the empty list - */ + BUG_ON(cfqq->on_rr); cfqq->on_rr = 1; cfqd->busy_queues++; - if (time_after(jiffies, cfqq->service_start + cfq_service)) - cfqq->service_used >>= 3; - - cfq_sort_rr_list(cfqq, 1); + cfq_resort_rr_list(cfqq, requeue); } static inline void cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq) { - list_move(&cfqq->cfq_list, &cfqd->empty_list); + BUG_ON(!cfqq->on_rr); cfqq->on_rr = 0; + list_move(&cfqq->cfq_list, &cfqd->empty_list); BUG_ON(!cfqd->busy_queues); cfqd->busy_queues--; @@ -507,14 +501,14 @@ static inline void cfq_del_crq_rb(struct struct cfq_data *cfqd = cfqq->cfqd; BUG_ON(!cfqq->queued[crq->is_sync]); + cfqq->queued[crq->is_sync]--; cfq_update_next_crq(crq); - cfqq->queued[crq->is_sync]--; - rb_erase(&crq->rb_node, &cfqq->sort_list); + rb_erase(&crq->rb_node, &cfqq->sort_list[crq->is_sync]); RB_CLEAR_COLOR(&crq->rb_node); - if (RB_EMPTY(&cfqq->sort_list) && cfqq->on_rr) + if (cfqq->on_rr && !(cfqq->queued[0] + cfqq->queued[1])) cfq_del_cfqq_rr(cfqd, cfqq); } } @@ -522,7 +516,7 @@ static inline void cfq_del_crq_rb(struct static struct cfq_rq * __cfq_add_crq_rb(struct cfq_rq *crq) { - struct rb_node **p = &crq->cfq_queue->sort_list.rb_node; + struct rb_node **p = &crq->cfq_queue->sort_list[crq->is_sync].rb_node; struct rb_node *parent = NULL; struct cfq_rq *__crq; @@ -548,6 +542,7 @@ static void cfq_add_crq_rb(struct cfq_rq struct cfq_data *cfqd = cfqq->cfqd; struct request *rq = crq->request; struct cfq_rq *__alias; + int dir = crq->is_sync; crq->rb_key = rq_rb_key(rq); cfqq->queued[crq->is_sync]++; @@ -559,22 +554,22 @@ static void cfq_add_crq_rb(struct cfq_rq while ((__alias = __cfq_add_crq_rb(crq)) != NULL) cfq_dispatch_sort(cfqd->queue, __alias); - rb_insert_color(&crq->rb_node, &cfqq->sort_list); + rb_insert_color(&crq->rb_node, &cfqq->sort_list[dir]); if (!cfqq->on_rr) - cfq_add_cfqq_rr(cfqd, cfqq); + cfq_add_cfqq_rr(cfqd, cfqq, crq->requeued); /* * check if this request is a better next-serve candidate */ - cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq); + cfqq->next_crq[dir] = cfq_choose_req(cfqd, cfqq->next_crq[dir], crq); } static inline void cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq) { if (ON_RB(&crq->rb_node)) { - rb_erase(&crq->rb_node, &cfqq->sort_list); + rb_erase(&crq->rb_node, &cfqq->sort_list[crq->is_sync]); cfqq->queued[crq->is_sync]--; } @@ -582,16 +577,16 @@ cfq_reposition_crq_rb(struct cfq_queue * } static struct request * -cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector) +cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector, int rw) + { - const unsigned long key = cfq_hash_key(cfqd, current); - struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, key); + struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid); struct rb_node *n; if (!cfqq) goto out; - n = cfqq->sort_list.rb_node; + n = cfqq->sort_list[rw].rb_node; while (n) { struct cfq_rq *crq = rb_entry_crq(n); @@ -610,24 +605,35 @@ out: /* * make sure the service time gets corrected on reissue of this request */ +static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq); static void cfq_requeue_request(request_queue_t *q, struct request *rq) { + struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_rq *crq = RQ_DATA(rq); if (crq) { struct cfq_queue *cfqq = crq->cfq_queue; - if (cfqq->cfqd->cfq_tagged) { - cfqq->service_used--; - cfq_sort_rr_list(cfqq, 0); - } - if (crq->accounted) { crq->accounted = 0; - cfqq->cfqd->rq_in_driver--; + WARN_ON(!cfqd->rq_in_driver); + cfqd->rq_in_driver--; + } + if (crq->in_flight) { + crq->in_flight = 0; + WARN_ON(!cfqq->in_flight); + cfqq->in_flight--; } } - list_add(&rq->queuelist, &q->queue_head); + + if (blk_fs_request(rq)) { + struct cfq_queue *cfqq = crq->cfq_queue; + + cfqq->next_crq[crq->is_sync] = crq; + crq->requeued = 1; + cfq_enqueue(cfqd, rq); + } else + list_add(&rq->queuelist, &q->queue_head); } static void cfq_remove_request(request_queue_t *q, struct request *rq) @@ -637,9 +643,8 @@ static void cfq_remove_request(request_q if (crq) { cfq_remove_merge_hints(q, crq); list_del_init(&rq->queuelist); + cfq_del_crq_rb(crq); - if (crq->cfq_queue) - cfq_del_crq_rb(crq); } } @@ -647,6 +652,7 @@ static int cfq_merge(request_queue_t *q, struct request **req, struct bio *bio) { struct cfq_data *cfqd = q->elevator->elevator_data; + const int rw = bio_data_dir(bio); struct request *__rq; int ret; @@ -657,21 +663,15 @@ cfq_merge(request_queue_t *q, struct req } __rq = cfq_find_rq_hash(cfqd, bio->bi_sector); - if (__rq) { - BUG_ON(__rq->sector + __rq->nr_sectors != bio->bi_sector); - - if (elv_rq_merge_ok(__rq, bio)) { - ret = ELEVATOR_BACK_MERGE; - goto out; - } + if (__rq && elv_rq_merge_ok(__rq, bio)) { + ret = ELEVATOR_BACK_MERGE; + goto out; } - __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio)); - if (__rq) { - if (elv_rq_merge_ok(__rq, bio)) { - ret = ELEVATOR_FRONT_MERGE; - goto out; - } + __rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio), rw); + if (__rq && elv_rq_merge_ok(__rq, bio)) { + ret = ELEVATOR_FRONT_MERGE; + goto out; } return ELEVATOR_NO_MERGE; @@ -704,22 +704,201 @@ static void cfq_merged_requests(request_queue_t *q, struct request *rq, struct request *next) { - struct cfq_rq *crq = RQ_DATA(rq); struct cfq_rq *cnext = RQ_DATA(next); cfq_merged_request(q, rq); - if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { - if (time_before(cnext->queue_start, crq->queue_start)) { - list_move(&rq->queuelist, &next->queuelist); - crq->queue_start = cnext->queue_start; - } - } + /* + * reposition in fifo if next is older than rq + */ + if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && + time_before(next->start_time, rq->start_time)) + list_move(&rq->queuelist, &next->queuelist); cfq_update_next_crq(cnext); cfq_remove_request(q, next); } +static inline void +__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + if (cfqq) { + cfqq->slice_start = jiffies; + cfqq->slice_end = 0; + cfqq->slice_left = 0; + del_timer(&cfqd->idle_class_timer); + + cfqq->dir ^= 1; + if (RB_EMPTY(&cfqq->sort_list[cfqq->dir])) + cfqq->dir ^= 1; + } + + cfqd->active_queue = cfqq; +} + +/* + * 0 + * 0,1 + * 0,1,2 + * 0,1,2,3 + * 0,1,2,3,4 + * 0,1,2,3,4,5 + * 0,1,2,3,4,5,6 + * 0,1,2,3,4,5,6,7 + */ +static int cfq_get_next_prio_level(struct cfq_data *cfqd) +{ + int prio, wrap; + + prio = -1; + wrap = 0; + do { + int p; + + for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) { + if (!list_empty(&cfqd->rr_list[p])) { + prio = p; + break; + } + } + + if (prio != -1) + break; + cfqd->cur_prio = 0; + if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) { + cfqd->cur_end_prio = 0; + if (wrap) + break; + wrap = 1; + } + } while (1); + + if (unlikely(prio == -1)) + return -1; + + BUG_ON(prio >= CFQ_PRIO_LISTS); + + list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr); + + cfqd->cur_prio = prio + 1; + if (cfqd->cur_prio > cfqd->cur_end_prio) { + cfqd->cur_end_prio = cfqd->cur_prio; + cfqd->cur_prio = 0; + } + if (cfqd->cur_end_prio > cfqd->end_prio) + cfqd->end_prio = cfqd->cur_end_prio; + if (cfqd->end_prio == CFQ_PRIO_LISTS) { + cfqd->cur_prio = 0; + cfqd->cur_end_prio = 0; + cfqd->end_prio = 0; + } + + return prio; +} + +static void cfq_set_active_queue(struct cfq_data *cfqd) +{ + struct cfq_queue *cfqq = NULL; + + /* + * if current list is non-empty, grab first entry. if it is empty, + * get next prio level and grab first entry then if any are spliced + */ + if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) + cfqq = list_entry_cfqq(cfqd->cur_rr.next); + + /* + * if we have idle queues and no rt or be queues had pending + * requests, either allow immediate service if the grace period + * has passed or arm the idle grace timer + */ + if (!cfqq && !list_empty(&cfqd->idle_rr)) { + unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; + + if (time_after_eq(jiffies, end)) + cfqq = list_entry_cfqq(cfqd->idle_rr.next); + else + mod_timer(&cfqd->idle_class_timer, end); + } + + __cfq_set_active_queue(cfqd, cfqq); +} + +/* + * current cfqq expired its slice (or was too idle), select new one + */ +static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted) +{ + struct cfq_queue *cfqq = cfqd->active_queue; + + if (cfqq) { + unsigned long now = jiffies; + + if (cfqq->wait_request) + del_timer(&cfqd->idle_slice_timer); + + if (!preempted && !cfqq->in_flight) + cfqq->service_last = now; + + cfqq->must_dispatch = 0; + cfqq->wait_request = 0; + + /* + * store what was left of this slice, if the queue idled out + * or was preempted + */ + if (time_after(now, cfqq->slice_end)) + cfqq->slice_left = now - cfqq->slice_end; + else + cfqq->slice_left = 0; + + if (cfqq->on_rr) + cfq_resort_rr_list(cfqq, preempted); + + cfqd->active_queue = NULL; + + if (cfqd->active_cic) { + put_io_context(cfqd->active_cic->ioc); + cfqd->active_cic = NULL; + } + } + + cfqd->dispatch_slice = 0; +} + +static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq) + +{ + WARN_ON(!RB_EMPTY(&cfqq->sort_list[cfqq->dir])); + WARN_ON(cfqq != cfqd->active_queue); + + /* + * idle is disabled, either manually or by past process history + */ + if (!cfqd->cfq_slice_idle) + return 0; + if (!cfqq->idle_window) + return 0; + /* + * task has exited, don't wait + */ + if (cfqd->active_cic && !cfqd->active_cic->ioc->task) + return 0; + + cfqq->wait_request = 1; + cfqq->must_alloc = 1; + + if (!timer_pending(&cfqd->idle_slice_timer)) { + unsigned long slice_left = cfqq->slice_end - 1; + + cfqd->idle_start = jiffies; + cfqd->idle_slice_timer.expires = min(jiffies + cfqd->cfq_slice_idle, slice_left); + add_timer(&cfqd->idle_slice_timer); + } + + return 1; +} + /* * we dispatch cfqd->cfq_quantum requests in total from the rr_list queues, * this function sector sorts the selected request to minimize seeks. we start @@ -733,31 +912,34 @@ static void cfq_dispatch_sort(request_qu struct request *__rq; sector_t last; - cfq_del_crq_rb(crq); - cfq_remove_merge_hints(q, crq); list_del(&crq->request->queuelist); last = cfqd->last_sector; - while ((entry = entry->prev) != head) { - __rq = list_entry_rq(entry); - - if (blk_barrier_rq(crq->request)) + list_for_each_entry_reverse(__rq, head, queuelist) { + if (blk_barrier_rq(__rq)) break; - if (!blk_fs_request(crq->request)) + if (!blk_fs_request(__rq)) break; - if (crq->request->sector > __rq->sector) + if (__rq->sector <= crq->request->sector) break; if (__rq->sector > last && crq->request->sector < last) { - last = crq->request->sector; + last = crq->request->sector + crq->request->nr_sectors; break; } + entry = &__rq->queuelist; } cfqd->last_sector = last; + + cfq_del_crq_rb(crq); + cfq_remove_merge_hints(q, crq); + + crq->end_pos = crq->request->sector + crq->request->nr_sectors; crq->in_flight = 1; + crq->requeued = 0; cfqq->in_flight++; - list_add(&crq->request->queuelist, entry); + list_add_tail(&crq->request->queuelist, entry); } /* @@ -766,105 +948,170 @@ static void cfq_dispatch_sort(request_qu static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq) { struct cfq_data *cfqd = cfqq->cfqd; - const int reads = !list_empty(&cfqq->fifo[0]); - const int writes = !list_empty(&cfqq->fifo[1]); unsigned long now = jiffies; + struct request *rq; struct cfq_rq *crq; if (time_before(now, cfqq->last_fifo_expire + cfqd->cfq_fifo_batch_expire)) return NULL; - crq = RQ_DATA(list_entry(cfqq->fifo[0].next, struct request, queuelist)); - if (reads && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_r)) { - cfqq->last_fifo_expire = now; - return crq; - } - - crq = RQ_DATA(list_entry(cfqq->fifo[1].next, struct request, queuelist)); - if (writes && time_after(now, crq->queue_start + cfqd->cfq_fifo_expire_w)) { - cfqq->last_fifo_expire = now; - return crq; + if (!list_empty(&cfqq->fifo[cfqq->dir])) { + crq = RQ_DATA(list_entry_fifo(cfqq->fifo[cfqq->dir].next)); + rq = crq->request; + if (time_after(now, rq->start_time + cfqd->cfq_fifo_expire_r)) { + cfqq->last_fifo_expire = now; + return crq; + } } return NULL; } /* - * dispatch a single request from given queue + * Scale schedule slice based on io priority */ +static inline int cfq_prio_to_slice(struct cfq_data *cfqd, int prio, int sync) +{ + int base_slice = cfqd->cfq_slice[sync]; + int prio_index = IOPRIO_PRIO_DATA(prio); + + return base_slice + base_slice * (CFQ_PRIO_LISTS - 1 - prio_index); +} + static inline void -cfq_dispatch_request(request_queue_t *q, struct cfq_data *cfqd, - struct cfq_queue *cfqq) +cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq, int prio) { - struct cfq_rq *crq; + cfqq->slice_end = cfq_prio_to_slice(cfqd, prio, cfqq->dir) + jiffies; +} + +static inline int cfq_prio_to_maxrq(struct cfq_data *cfqd, int prio) +{ + int base_rq = cfqd->cfq_slice_async_rq; + int prio_index = IOPRIO_PRIO_DATA(prio); + + return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - prio_index)); +} + +/* + * get next queue for service + */ +static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd) +{ + unsigned long now = jiffies; + struct cfq_queue *cfqq; + + cfqq = cfqd->active_queue; + if (!cfqq) + goto new_queue; /* - * follow expired path, else get first next available + * slice has expired */ - if ((crq = cfq_check_fifo(cfqq)) == NULL) { - if (cfqd->find_best_crq) - crq = cfqq->next_crq; - else - crq = rb_entry_crq(rb_first(&cfqq->sort_list)); - } - - cfqd->last_sector = crq->request->sector + crq->request->nr_sectors; + if (!cfqq->must_dispatch && time_after(jiffies, cfqq->slice_end)) + goto new_queue; /* - * finally, insert request into driver list + * if queue has requests, dispatch one. if not, check if + * enough slice is left to wait for one */ - cfq_dispatch_sort(q, crq); + if (!RB_EMPTY(&cfqq->sort_list[cfqq->dir])) + goto keep_queue; + else if (cfqq->dir == CFQ_SYNC && time_before(now, cfqq->slice_end)) { + if (cfq_arm_slice_timer(cfqd, cfqq)) + return NULL; + } + +new_queue: + cfq_slice_expired(cfqd, 0); + cfq_set_active_queue(cfqd); +keep_queue: + return cfqd->active_queue; } -static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch) +static int +__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq, + int max_dispatch) { - struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_queue *cfqq; - struct list_head *entry, *tmp; - int queued, busy_queues, first_round; + int dispatched = 0, prio; - if (list_empty(&cfqd->rr_list)) - return 0; + BUG_ON(RB_EMPTY(&cfqq->sort_list[cfqq->dir])); - queued = 0; - first_round = 1; -restart: - busy_queues = 0; - list_for_each_safe(entry, tmp, &cfqd->rr_list) { - cfqq = list_entry_cfqq(entry); + prio = cfqq->ioprio | (cfqq->ioprio_class << IOPRIO_CLASS_SHIFT); + do { + struct cfq_rq *crq; - BUG_ON(RB_EMPTY(&cfqq->sort_list)); + /* + * follow expired path, else get first next available + */ + if ((crq = cfq_check_fifo(cfqq)) == NULL) + crq = cfqq->next_crq[cfqq->dir]; /* - * first round of queueing, only select from queues that - * don't already have io in-flight + * finally, insert request into driver dispatch list */ - if (first_round && cfqq->in_flight) - continue; + cfq_dispatch_sort(cfqd->queue, crq); - cfq_dispatch_request(q, cfqd, cfqq); + cfqd->dispatch_slice++; + dispatched++; - if (!RB_EMPTY(&cfqq->sort_list)) - busy_queues++; + if (!cfqd->active_cic) { + atomic_inc(&crq->io_context->ioc->refcount); + cfqd->active_cic = crq->io_context; + } - queued++; - } + if (RB_EMPTY(&cfqq->sort_list[cfqq->dir])) + break; + + } while (dispatched < max_dispatch); + + /* + * if slice end isn't set yet, set it. if at least one request was + * sync, use the sync time slice value + */ + if (!cfqq->slice_end) + cfq_set_prio_slice(cfqd, cfqq, prio); + + /* + * expire an async queue immediately if it has used up its slice. idle + * queue always expire after 1 dispatch round. + */ + if ((cfqq->dir == CFQ_ASYNC && + cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, prio)) || + cfq_class_idle(cfqq)) + cfq_slice_expired(cfqd, 0); + + return dispatched; +} + +static int cfq_dispatch_requests(request_queue_t *q, int max_dispatch) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq; + + if (!cfqd->busy_queues) + return 0; - if ((queued < max_dispatch) && (busy_queues || first_round)) { - first_round = 0; - goto restart; + cfqq = cfq_select_queue(cfqd); + if (cfqq) { + cfqq->wait_request = 0; + cfqq->must_dispatch = 0; + del_timer(&cfqd->idle_slice_timer); + + if (cfq_class_idle(cfqq)) + max_dispatch = 1; + + return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); } - return queued; + return 0; } static inline void cfq_account_dispatch(struct cfq_rq *crq) { struct cfq_queue *cfqq = crq->cfq_queue; struct cfq_data *cfqd = cfqq->cfqd; - unsigned long now, elapsed; - if (!blk_fs_request(crq->request)) + if (unlikely(!blk_fs_request(crq->request))) return; /* @@ -874,65 +1121,34 @@ static inline void cfq_account_dispatch( if (crq->accounted) return; - now = jiffies; - if (cfqq->service_start == ~0UL) - cfqq->service_start = now; - - /* - * on drives with tagged command queueing, command turn-around time - * doesn't necessarily reflect the time spent processing this very - * command inside the drive. so do the accounting differently there, - * by just sorting on the number of requests - */ - if (cfqd->cfq_tagged) { - if (time_after(now, cfqq->service_start + cfq_service)) { - cfqq->service_start = now; - cfqq->service_used /= 10; - } - - cfqq->service_used++; - cfq_sort_rr_list(cfqq, 0); - } - - elapsed = now - crq->queue_start; - if (elapsed > max_elapsed_dispatch) - max_elapsed_dispatch = elapsed; - crq->accounted = 1; - crq->service_start = now; - - if (++cfqd->rq_in_driver >= CFQ_MAX_TAG && !cfqd->cfq_tagged) { - cfqq->cfqd->cfq_tagged = 1; - printk("cfq: depth %d reached, tagging now on\n", CFQ_MAX_TAG); - } + cfqd->rq_in_driver++; } static inline void cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq) { struct cfq_data *cfqd = cfqq->cfqd; + unsigned long now; if (!crq->accounted) return; + now = jiffies; + WARN_ON(!cfqd->rq_in_driver); cfqd->rq_in_driver--; - if (!cfqd->cfq_tagged) { - unsigned long now = jiffies; - unsigned long duration = now - crq->service_start; + if (!cfq_class_idle(cfqq)) + cfqd->last_end_request = now; - if (time_after(now, cfqq->service_start + cfq_service)) { - cfqq->service_start = now; - cfqq->service_used >>= 3; - } - - cfqq->service_used += duration; - cfq_sort_rr_list(cfqq, 0); - - if (duration > max_elapsed_crq) - max_elapsed_crq = duration; + if (!cfqq->in_flight && cfqq->on_rr) { + cfqq->service_last = now; + cfq_resort_rr_list(cfqq, 0); } + + if (crq->is_sync) + crq->io_context->last_end_request = now; } static struct request *cfq_next_request(request_queue_t *q) @@ -945,7 +1161,15 @@ static struct request *cfq_next_request( dispatch: rq = list_entry_rq(q->queue_head.next); - if ((crq = RQ_DATA(rq)) != NULL) { + crq = RQ_DATA(rq); + if (crq) { + /* + * if idle window is disabled, allow queue buildup + */ + if (!crq->in_flight && !crq->cfq_queue->idle_window && + cfqd->rq_in_driver >= cfqd->cfq_max_depth) + return NULL; + cfq_remove_merge_hints(q, crq); cfq_account_dispatch(crq); } @@ -967,14 +1191,23 @@ dispatch: */ static void cfq_put_queue(struct cfq_queue *cfqq) { - BUG_ON(!atomic_read(&cfqq->ref)); + struct cfq_data *cfqd = cfqq->cfqd; + + BUG_ON(atomic_read(&cfqq->ref) <= 0); if (!atomic_dec_and_test(&cfqq->ref)) return; - BUG_ON(rb_first(&cfqq->sort_list)); + BUG_ON(rb_first(&cfqq->sort_list[READ])); + BUG_ON(rb_first(&cfqq->sort_list[WRITE])); + BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]); BUG_ON(cfqq->on_rr); + if (unlikely(cfqd->active_queue == cfqq)) { + cfq_slice_expired(cfqd, 0); + kblockd_schedule_work(&cfqd->unplug_work); + } + cfq_put_cfqd(cfqq->cfqd); /* @@ -986,7 +1219,7 @@ static void cfq_put_queue(struct cfq_que } static inline struct cfq_queue * -__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key, const int hashval) +__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, const int hashval) { struct hlist_head *hash_list = &cfqd->cfq_hash[hashval]; struct hlist_node *entry, *next; @@ -1002,94 +1235,242 @@ __cfq_find_cfq_hash(struct cfq_data *cfq } static struct cfq_queue * -cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned long key) +cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key) { return __cfq_find_cfq_hash(cfqd, key, hash_long(key, CFQ_QHASH_SHIFT)); } -static inline void -cfq_rehash_cfqq(struct cfq_data *cfqd, struct cfq_queue **cfqq, - struct cfq_io_context *cic) +static void cfq_free_io_context(struct cfq_io_context *cic) { - unsigned long hashkey = cfq_hash_key(cfqd, current); - unsigned long hashval = hash_long(hashkey, CFQ_QHASH_SHIFT); - struct cfq_queue *__cfqq; + kmem_cache_free(cfq_ioc_pool, cic); +} + +/* + * A note on the RCU usage in cfq: + * + * rcu is used to protect the task list of cfq_io_contexts for the various + * queues it is doing io against. in case of async write, the lookup of the + * io contexts may happen from a different process, but updates to the list + * always happen in the context of the process itself. So on the write side, + * we don't need additional locking. The read side is covered with + * rcu_read_lock() and using the proper rcu list iteration helpers + */ +static void cfq_rcu_queue_put(struct rcu_head *h) +{ + struct cfq_io_context *cic = container_of(h, struct cfq_io_context,rcu); + request_queue_t *q = cic->cfqq->cfqd->queue; unsigned long flags; - spin_lock_irqsave(cfqd->queue->queue_lock, flags); + spin_lock_irqsave(q->queue_lock, flags); + cfq_put_queue(cic->cfqq); + cic->cfqq = NULL; + spin_unlock_irqrestore(q->queue_lock, flags); + put_io_context(cic->ioc); +} - hlist_del(&(*cfqq)->cfq_hash); +/* + * Called with interrupts disabled, schedule rcu deletion of cic + */ +static void cfq_exit_single_io_context(struct cfq_io_context *cic) +{ + struct cfq_data *cfqd = cic->cfqq->cfqd; + request_queue_t *q = cfqd->queue; + + WARN_ON(!irqs_disabled()); - __cfqq = __cfq_find_cfq_hash(cfqd, hashkey, hashval); - if (!__cfqq || __cfqq == *cfqq) { - __cfqq = *cfqq; - hlist_add_head(&__cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); - __cfqq->key_type = cfqd->key_type; - } else { - atomic_inc(&__cfqq->ref); - cic->cfqq = __cfqq; - cfq_put_queue(*cfqq); - *cfqq = __cfqq; - } + spin_lock(q->queue_lock); - cic->cfqq = __cfqq; - spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); -} + if (unlikely(cic->cfqq == cfqd->active_queue)) { + cfq_slice_expired(cfqd, 0); + kblockd_schedule_work(&cfqd->unplug_work); + } -static void cfq_free_io_context(struct cfq_io_context *cic) -{ - kmem_cache_free(cfq_ioc_pool, cic); + spin_unlock(q->queue_lock); + call_rcu(&cic->rcu, cfq_rcu_queue_put); } /* - * locking hierarchy is: io_context lock -> queue locks + * Another task may update the task cic list, if it is doing a queue lookup + * on its behalf. cfq_cic_lock excludes such concurrent updates */ static void cfq_exit_io_context(struct cfq_io_context *cic) { - struct cfq_queue *cfqq = cic->cfqq; - struct list_head *entry = &cic->list; - request_queue_t *q; + struct cfq_io_context *__cic; + struct list_head *entry, *nxt; unsigned long flags; /* * put the reference this task is holding to the various queues */ - spin_lock_irqsave(&cic->ioc->lock, flags); - while ((entry = cic->list.next) != &cic->list) { - struct cfq_io_context *__cic; + spin_lock_irqsave(&cfq_cic_lock, flags); + list_for_each_safe_rcu(entry, nxt, &cic->list) { __cic = list_entry(entry, struct cfq_io_context, list); - list_del(entry); - q = __cic->cfqq->cfqd->queue; - spin_lock(q->queue_lock); - cfq_put_queue(__cic->cfqq); - spin_unlock(q->queue_lock); + list_del_rcu(&__cic->list); + cfq_exit_single_io_context(__cic); } - q = cfqq->cfqd->queue; - spin_lock(q->queue_lock); - cfq_put_queue(cfqq); - spin_unlock(q->queue_lock); - - cic->cfqq = NULL; - spin_unlock_irqrestore(&cic->ioc->lock, flags); + cfq_exit_single_io_context(cic); + spin_unlock_irqrestore(&cfq_cic_lock, flags); } -static struct cfq_io_context *cfq_alloc_io_context(int gfp_flags) +static struct cfq_io_context * +cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask) { - struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_flags); + struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask); if (cic) { - cic->dtor = cfq_free_io_context; - cic->exit = cfq_exit_io_context; INIT_LIST_HEAD(&cic->list); cic->cfqq = NULL; + cic->key = NULL; + INIT_RCU_HEAD(&cic->rcu); + cic->last_end_request = jiffies; + cic->ttime_total = 0; + cic->ttime_samples = 0; + cic->ttime_mean = 0; + cic->dtor = cfq_free_io_context; + cic->exit = cfq_exit_io_context; } return cic; } +static void cfq_init_prio_data(struct cfq_queue *cfqq) +{ + int ioprio_class; + + if (!cfqq->prio_changed) + return; + + ioprio_class = IOPRIO_PRIO_CLASS(current->ioprio); + switch (ioprio_class) { + default: + printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); + case IOPRIO_CLASS_NONE: + /* + * no prio set, place us in the middle of the BE classes + */ + cfqq->ioprio = task_nice_ioprio(current); + cfqq->ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_RT: + cfqq->ioprio = task_ioprio(current); + cfqq->ioprio_class = IOPRIO_CLASS_RT; + break; + case IOPRIO_CLASS_BE: + cfqq->ioprio = task_ioprio(current); + cfqq->ioprio_class = IOPRIO_CLASS_BE; + break; + case IOPRIO_CLASS_IDLE: + cfqq->ioprio_class = IOPRIO_CLASS_IDLE; + cfqq->ioprio = 7; + cfqq->idle_window = 0; + break; + } + + /* + * keep track of original prio settings in case we have to temporarily + * elevate the priority of this queue + */ + cfqq->org_ioprio = cfqq->ioprio; + cfqq->org_ioprio_class = cfqq->ioprio_class; + + if (cfqq->on_rr) + cfq_resort_rr_list(cfqq, 0); + + cfqq->prio_changed = 0; +} + +static inline void changed_ioprio(struct cfq_queue *cfqq) +{ + if (cfqq) { + struct cfq_data *cfqd = cfqq->cfqd; + + spin_lock(cfqd->queue->queue_lock); + cfqq->prio_changed = 1; + cfq_init_prio_data(cfqq); + spin_unlock(cfqd->queue->queue_lock); + } +} + +/* + * callback from sys_ioprio_set, irqs are disabled + */ +static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) +{ + struct cfq_io_context *cic = ioc->cic; + + changed_ioprio(cic->cfqq); + + rcu_read_lock(); + + list_for_each_entry_rcu(cic, &cic->list, list) + changed_ioprio(cic->cfqq); + + rcu_read_unlock(); + return 0; +} + +static struct cfq_queue * +cfq_get_queue(struct cfq_data *cfqd, unsigned int key, int gfp_mask) +{ + const int hashval = hash_long(key, CFQ_QHASH_SHIFT); + struct cfq_queue *cfqq, *new_cfqq = NULL; + +retry: + cfqq = __cfq_find_cfq_hash(cfqd, key, hashval); + + if (!cfqq) { + if (new_cfqq) { + cfqq = new_cfqq; + new_cfqq = NULL; + } else if (gfp_mask & __GFP_WAIT) { + spin_unlock_irq(cfqd->queue->queue_lock); + new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); + spin_lock_irq(cfqd->queue->queue_lock); + goto retry; + } else { + cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); + if (!cfqq) + goto out; + } + + memset(cfqq, 0, sizeof(*cfqq)); + + INIT_HLIST_NODE(&cfqq->cfq_hash); + INIT_LIST_HEAD(&cfqq->cfq_list); + RB_CLEAR_ROOT(&cfqq->sort_list[0]); + RB_CLEAR_ROOT(&cfqq->sort_list[1]); + INIT_LIST_HEAD(&cfqq->fifo[0]); + INIT_LIST_HEAD(&cfqq->fifo[1]); + + cfqq->dir = CFQ_SYNC; + cfqq->key = key; + hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); + atomic_set(&cfqq->ref, 0); + cfqq->cfqd = cfqd; + atomic_inc(&cfqd->ref); + cfqq->service_last = 0; + /* + * set ->slice_left to allow preemption for a new process + */ + cfqq->slice_left = 2 * cfqd->cfq_slice_idle; + cfqq->idle_window = 1; + cfqq->ioprio = -1; + cfqq->ioprio_class = -1; + cfqq->prio_changed = 1; + } + + if (new_cfqq) + kmem_cache_free(cfq_pool, new_cfqq); + + atomic_inc(&cfqq->ref); +out: + WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); + return cfqq; +} + /* * Setup general io context and cfq io context. There can be several cfq * io contexts per general io context, if this process is doing io to more @@ -1097,39 +1478,81 @@ static struct cfq_io_context *cfq_alloc_ * cfqq, so we don't need to worry about it disappearing */ static struct cfq_io_context * -cfq_get_io_context(struct cfq_queue **cfqq, int gfp_flags) +cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask) { - struct cfq_data *cfqd = (*cfqq)->cfqd; - struct cfq_queue *__cfqq = *cfqq; + struct io_context *ioc = NULL; struct cfq_io_context *cic; - struct io_context *ioc; + unsigned long flags; + int same_process = 1; - might_sleep_if(gfp_flags & __GFP_WAIT); + might_sleep_if(gfp_mask & __GFP_WAIT); - ioc = get_io_context(gfp_flags); - if (!ioc) - return NULL; + /* + * this only happens once in a process life time, so grabbing the + * global tasklist_lock should not be a contention point. + * + * locate a specific task, if this process is doing async io on + * behalf of someone else. we need to find its io_context, or set one + * up if it doesn't exist + */ + if (pid != current->pid) { + struct io_context *new_ioc; + struct task_struct *p; + + same_process = 0; + new_ioc = alloc_io_context(gfp_mask); + ioc = NULL; + + read_lock_irqsave(&tasklist_lock, flags); + p = find_task_by_pid(pid); + if (p) { + if (p->io_context) { + ioc = p->io_context; + atomic_inc(&ioc->refcount); + } else if (new_ioc) { + p->io_context = new_ioc; + atomic_inc(&new_ioc->refcount); + new_ioc->pid = p->pid; + new_ioc->task = p; + new_ioc = NULL; + } + } + read_unlock_irqrestore(&tasklist_lock, flags); + + if (new_ioc) + free_io_context(new_ioc); + } + + if (!ioc) { + ioc = get_io_context(gfp_mask); + if (!ioc) + return NULL; + } if ((cic = ioc->cic) == NULL) { - cic = cfq_alloc_io_context(gfp_flags); + cic = cfq_alloc_io_context(cfqd, gfp_mask); if (cic == NULL) goto err; + /* + * manually increment generic io_context usage count, it + * cannot go away since we are already holding one ref to it + */ ioc->cic = cic; + ioc->set_ioprio = cfq_ioc_set_ioprio; cic->ioc = ioc; - cic->cfqq = __cfqq; - atomic_inc(&__cfqq->ref); + atomic_inc(&ioc->refcount); + + cic->key = cfqd; + atomic_inc(&cfqd->ref); } else { struct cfq_io_context *__cic; - unsigned long flags; /* - * since the first cic on the list is actually the head - * itself, need to check this here or we'll duplicate an - * cic per ioc for no reason + * the first cic on the list is actually the head itself */ - if (cic->cfqq == __cfqq) + if (cic->key == cfqd) goto out; /* @@ -1137,117 +1560,237 @@ cfq_get_io_context(struct cfq_queue **cf * should be ok here, the list will usually not be more than * 1 or a few entries long */ - spin_lock_irqsave(&ioc->lock, flags); - list_for_each_entry(__cic, &cic->list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(__cic, &cic->list, list) { /* * this process is already holding a reference to * this queue, so no need to get one more */ - if (__cic->cfqq == __cfqq) { + if (__cic->key == cfqd) { + rcu_read_unlock(); cic = __cic; - spin_unlock_irqrestore(&ioc->lock, flags); goto out; } } - spin_unlock_irqrestore(&ioc->lock, flags); + rcu_read_unlock(); /* * nope, process doesn't have a cic assoicated with this * cfqq yet. get a new one and add to list */ - __cic = cfq_alloc_io_context(gfp_flags); + __cic = cfq_alloc_io_context(cfqd, gfp_mask); if (__cic == NULL) goto err; __cic->ioc = ioc; - __cic->cfqq = __cfqq; - atomic_inc(&__cfqq->ref); - spin_lock_irqsave(&ioc->lock, flags); - list_add(&__cic->list, &cic->list); - spin_unlock_irqrestore(&ioc->lock, flags); + atomic_inc(&ioc->refcount); + + __cic->key = cfqd; + atomic_inc(&cfqd->ref); + + /* + * minor optimization - we only need to grab the global + * cic lock if it isn't the process itself setting up the cic + */ + if (!same_process) { + spin_lock_irqsave(&cfq_cic_lock, flags); + list_add_rcu(&__cic->list, &cic->list); + spin_unlock_irqrestore(&cfq_cic_lock, flags); + } else + list_add_rcu(&__cic->list, &cic->list); cic = __cic; - *cfqq = __cfqq; } out: - /* - * if key_type has been changed on the fly, we lazily rehash - * each queue at lookup time - */ - if ((*cfqq)->key_type != cfqd->key_type) - cfq_rehash_cfqq(cfqd, cfqq, cic); - return cic; err: put_io_context(ioc); return NULL; } -static struct cfq_queue * -__cfq_get_queue(struct cfq_data *cfqd, unsigned long key, int gfp_mask) +static void +cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic) { - const int hashval = hash_long(key, CFQ_QHASH_SHIFT); - struct cfq_queue *cfqq, *new_cfqq = NULL; + unsigned long elapsed, ttime; -retry: - cfqq = __cfq_find_cfq_hash(cfqd, key, hashval); + /* + * if this context already has stuff queued, thinktime is from + * last queue not last end + */ +#if 0 + if (time_after(cic->last_end_request, cic->last_queue)) + elapsed = jiffies - cic->last_end_request; + else + elapsed = jiffies - cic->last_queue; +#else + elapsed = jiffies - cic->last_end_request; +#endif - if (!cfqq) { - if (new_cfqq) { - cfqq = new_cfqq; - new_cfqq = NULL; - } else if (gfp_mask & __GFP_WAIT) { - spin_unlock_irq(cfqd->queue->queue_lock); - new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask); - spin_lock_irq(cfqd->queue->queue_lock); - goto retry; - } else - goto out; + ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle); - memset(cfqq, 0, sizeof(*cfqq)); + cic->ttime_samples = (7*cic->ttime_samples + 256) / 8; + cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8; + cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples; +} - INIT_HLIST_NODE(&cfqq->cfq_hash); - INIT_LIST_HEAD(&cfqq->cfq_list); - RB_CLEAR_ROOT(&cfqq->sort_list); - INIT_LIST_HEAD(&cfqq->fifo[0]); - INIT_LIST_HEAD(&cfqq->fifo[1]); +#define sample_valid(samples) ((samples) > 80) - cfqq->key = key; - hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); - atomic_set(&cfqq->ref, 0); - cfqq->cfqd = cfqd; - atomic_inc(&cfqd->ref); - cfqq->key_type = cfqd->key_type; - cfqq->service_start = ~0UL; - } +/* + * Disable idle window if the process thinks too long or seeks so much that + * it doesn't matter + */ +static void +cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct cfq_io_context *cic) +{ + int enable_idle = 0; - if (new_cfqq) - kmem_cache_free(cfq_pool, new_cfqq); + if (!cic->ioc->task) + goto disable; - atomic_inc(&cfqq->ref); -out: - WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); - return cfqq; + if (sample_valid(cic->ttime_samples) && + cic->ttime_mean > cfqd->cfq_slice_idle) + goto disable; + + if (!cfqd->cfq_slice_idle) + goto disable; + + enable_idle = 1; +disable: + cfqq->idle_window = enable_idle; } -static void cfq_enqueue(struct cfq_data *cfqd, struct cfq_rq *crq) + +/* + * Check if new_cfqq should preempt the currently active queue. Return 0 for + * no or if we aren't sure, a 1 will cause a preempt. + */ +static int +cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, + struct cfq_rq *crq) { - crq->is_sync = 0; - if (rq_data_dir(crq->request) == READ || current->flags & PF_SYNCWRITE) - crq->is_sync = 1; + struct cfq_queue *cfqq = cfqd->active_queue; + + if (cfq_class_idle(new_cfqq)) + return 0; + + if (!cfqq) + return 1; + + if (cfq_class_idle(cfqq)) + return 1; + if (!new_cfqq->wait_request) + return 0; + /* + * if it doesn't have slice left, forget it + */ + if (new_cfqq->slice_left < cfqd->cfq_slice_idle) + return 0; + if (crq->is_sync && cfqq->dir == CFQ_ASYNC) + return 1; + + return 0; +} + +/* + * cfqq preempts the active queue. if we allowed preempt with no slice left, + * let it have half of its nominal slice. + */ +static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + struct cfq_queue *__cfqq, *next; + + list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list) + cfq_resort_rr_list(__cfqq, 1); + + if (!cfqq->slice_left) + cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq->ioprio, cfqq->dir) / 2; + + cfqq->slice_end = cfqq->slice_left + jiffies; + cfq_slice_expired(cfqd, 1); + __cfq_set_active_queue(cfqd, cfqq); +} + +/* + * should really be a ll_rw_blk.c helper + */ +static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq) +{ + request_queue_t *q = cfqd->queue; + + if (!blk_queue_plugged(q)) + q->request_fn(q); + else + __generic_unplug_device(q); +} + +/* + * Called when a new fs request (crq) is added (to cfqq). Check if there's + * something we should do about it + */ +static void +cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct cfq_rq *crq) +{ + if (crq->is_sync) { + struct cfq_io_context *cic = crq->io_context; + + cfq_update_io_thinktime(cfqd, cic); + cfq_update_idle_window(cfqd, cfqq, cic); + + cic->last_queue = jiffies; + } + + if (cfqq == cfqd->active_queue) { + /* + * if we are waiting for a request for this queue, let it rip + * immediately and flag that we must not expire this queue + * just now + */ + if (cfqq->wait_request) { + cfqq->idle_hit++; + cfqq->must_dispatch = 1; + del_timer(&cfqd->idle_slice_timer); + cfq_start_queueing(cfqd, cfqq); + } + } else if (cfq_should_preempt(cfqd, cfqq, crq)) { + /* + * not the active queue - expire current slice if it is + * idle and has expired it's mean thinktime or this new queue + * has some old slice time left and is of higher priority + */ + cfq_preempt_queue(cfqd, cfqq); + cfqq->must_dispatch = 1; + cfq_start_queueing(cfqd, cfqq); + } +} + +static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq) +{ + struct cfq_rq *crq = RQ_DATA(rq); + struct cfq_queue *cfqq = crq->cfq_queue; + + cfq_init_prio_data(cfqq); cfq_add_crq_rb(crq); - crq->queue_start = jiffies; - list_add_tail(&crq->request->queuelist, &crq->cfq_queue->fifo[crq->is_sync]); + list_add_tail(&rq->queuelist, &cfqq->fifo[crq->is_sync]); + + if (rq_mergeable(rq)) { + cfq_add_crq_hash(cfqd, crq); + + if (!cfqd->queue->last_merge) + cfqd->queue->last_merge = rq; + } + + cfq_crq_enqueued(cfqd, cfqq, crq); } static void cfq_insert_request(request_queue_t *q, struct request *rq, int where) { struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_rq *crq = RQ_DATA(rq); switch (where) { case ELEVATOR_INSERT_BACK: @@ -1260,26 +1803,19 @@ cfq_insert_request(request_queue_t *q, s break; case ELEVATOR_INSERT_SORT: BUG_ON(!blk_fs_request(rq)); - cfq_enqueue(cfqd, crq); + cfq_enqueue(cfqd, rq); break; default: printk("%s: bad insert point %d\n", __FUNCTION__,where); return; } - - if (rq_mergeable(rq)) { - cfq_add_crq_hash(cfqd, crq); - - if (!q->last_merge) - q->last_merge = rq; - } } static int cfq_queue_empty(request_queue_t *q) { struct cfq_data *cfqd = q->elevator->elevator_data; - return list_empty(&q->queue_head) && list_empty(&cfqd->rr_list); + return list_empty(&q->queue_head) && !cfqd->busy_queues; } static void cfq_completed_request(request_queue_t *q, struct request *rq) @@ -1324,36 +1860,119 @@ cfq_latter_request(request_queue_t *q, s return NULL; } -static int cfq_may_queue(request_queue_t *q, int rw) +/* + * we temporarily boost lower priority queues if they are holding fs exclusive + * resources. they are boosted to normal prio (CLASS_BE/4) + */ +static void cfq_prio_boost(struct cfq_queue *cfqq) { - struct cfq_data *cfqd = q->elevator->elevator_data; - struct cfq_queue *cfqq; - int ret = ELV_MQUEUE_MAY; + const int ioprio_class = cfqq->ioprio_class; + const int ioprio = cfqq->ioprio; - if (current->flags & PF_MEMALLOC) - return ELV_MQUEUE_MAY; + if (has_fs_excl()) { + /* + * boost idle prio on transactions that would lock out other + * users of the filesystem + */ + if (cfq_class_idle(cfqq)) + cfqq->ioprio_class = IOPRIO_CLASS_BE; + if (cfqq->ioprio > IOPRIO_NORM) + cfqq->ioprio = IOPRIO_NORM; + } else { + /* + * check if we need to unboost the queue + */ + if (cfqq->ioprio_class != cfqq->org_ioprio_class) + cfqq->ioprio_class = cfqq->org_ioprio_class; + if (cfqq->ioprio != cfqq->org_ioprio) + cfqq->ioprio = cfqq->org_ioprio; + } - cfqq = cfq_find_cfq_hash(cfqd, cfq_hash_key(cfqd, current)); - if (cfqq) { - int limit = cfqd->max_queued; + /* + * refile between round-robin lists if we moved the priority class + */ + if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) && + cfqq->on_rr) + cfq_resort_rr_list(cfqq, 0); +} - if (cfqq->allocated[rw] < cfqd->cfq_queued) - return ELV_MQUEUE_MUST; +static inline pid_t __cfq_queue_pid(struct task_struct *task, struct bio *bio) +{ + pid_t pid = 0; - if (cfqd->busy_queues) - limit = q->nr_requests / cfqd->busy_queues; + if (bio && bio_data_dir(bio) == WRITE && !process_sync(task)) + pid = bio_dirty_pid(bio); + if (!pid) + pid = task->pid; - if (limit < cfqd->cfq_queued) - limit = cfqd->cfq_queued; - else if (limit > cfqd->max_queued) - limit = cfqd->max_queued; + return pid; +} - if (cfqq->allocated[rw] >= limit) { - if (limit > cfqq->alloc_limit[rw]) - cfqq->alloc_limit[rw] = limit; +static inline pid_t cfq_queue_pid(struct task_struct *task, struct bio *bio) +{ + if (!task_is_pdflush(task)) + return __cfq_queue_pid(task, bio); - ret = ELV_MQUEUE_NO; - } + return task->pid; +} + +static inline int +__cfq_cfqq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq, + struct task_struct *task, int rw) +{ + if (!cfqq || task->flags & PF_MEMALLOC) + return ELV_MQUEUE_MAY; + if (!cfqq->allocated[rw] || cfqq->must_alloc) { + /* + * even though it's early to clear it here, we will only + * fail to allocate a request on oom and the process could + * call elv_may_queue() hundreds of times before the flag + * gets cleared in cfq_set_request() + */ + cfqq->must_alloc = 0; + return ELV_MQUEUE_MUST; + } + if (cfqq->allocated[rw] >= cfqd->max_queued || cfq_class_idle(cfqq)) + return ELV_MQUEUE_NO; + + return ELV_MQUEUE_MAY; +} + +static int cfq_may_queue(request_queue_t *q, int rw, struct bio *bio) +{ + struct cfq_data *cfqd = q->elevator->elevator_data; + struct cfq_queue *cfqq, *rcfqq = NULL; + struct task_struct *task = current; + int ret = ELV_MQUEUE_MAY, r; + + /* + * don't force setup of a queue from here, as a call to may_queue + * does not necessarily imply that a request actually will be queued. + * so just lookup a possibly existing queue, or return 'may queue' + * if that fails + */ + cfqq = cfq_find_cfq_hash(cfqd, cfq_queue_pid(task, bio)); + if (cfqq) { + cfq_init_prio_data(cfqq); + cfq_prio_boost(cfqq); + + ret = __cfq_cfqq_may_queue(cfqd, cfqq, task, rw); + if (ret != ELV_MQUEUE_NO || rw == READ) + return ret; + + /* + * for MUST and NO, check if the real queue agrees + */ + rcfqq = cfq_find_cfq_hash(cfqd, task->pid); + if (rcfqq == cfqq) + return ret; + else if (!rcfqq) + return ELV_MQUEUE_MAY; + + cfq_init_prio_data(rcfqq); + r = __cfq_cfqq_may_queue(cfqd, rcfqq, task, rw); + if (r == ret) + return ret; } return ret; @@ -1361,14 +1980,20 @@ static int cfq_may_queue(request_queue_t static void cfq_check_waiters(request_queue_t *q, struct cfq_queue *cfqq) { + struct cfq_data *cfqd = q->elevator->elevator_data; struct request_list *rl = &q->rq; - const int write = waitqueue_active(&rl->wait[WRITE]); - const int read = waitqueue_active(&rl->wait[READ]); - if (read && cfqq->allocated[READ] < cfqq->alloc_limit[READ]) - wake_up(&rl->wait[READ]); - if (write && cfqq->allocated[WRITE] < cfqq->alloc_limit[WRITE]) - wake_up(&rl->wait[WRITE]); + if (cfqq->allocated[READ] <= cfqd->max_queued || cfqd->rq_starved) { + smp_mb(); + if (waitqueue_active(&rl->wait[READ])) + wake_up(&rl->wait[READ]); + } + + if (cfqq->allocated[WRITE] <= cfqd->max_queued || cfqd->rq_starved) { + smp_mb(); + if (waitqueue_active(&rl->wait[WRITE])) + wake_up(&rl->wait[WRITE]); + } } /* @@ -1381,69 +2006,63 @@ static void cfq_put_request(request_queu if (crq) { struct cfq_queue *cfqq = crq->cfq_queue; + const int rw = rq_data_dir(rq); - BUG_ON(q->last_merge == rq); - BUG_ON(!hlist_unhashed(&crq->hash)); + BUG_ON(!cfqq->allocated[rw]); + cfqq->allocated[rw]--; - if (crq->io_context) - put_io_context(crq->io_context->ioc); - - BUG_ON(!cfqq->allocated[crq->is_write]); - cfqq->allocated[crq->is_write]--; + put_io_context(crq->io_context->ioc); mempool_free(crq, cfqd->crq_pool); rq->elevator_private = NULL; - smp_mb(); cfq_check_waiters(q, cfqq); cfq_put_queue(cfqq); } } /* - * Allocate cfq data structures associated with this request. A queue and + * Allocate cfq data structures associated with this request. */ -static int cfq_set_request(request_queue_t *q, struct request *rq, int gfp_mask) +static int +cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, + int gfp_mask) { struct cfq_data *cfqd = q->elevator->elevator_data; struct cfq_io_context *cic; const int rw = rq_data_dir(rq); - struct cfq_queue *cfqq, *saved_cfqq; + struct cfq_queue *cfqq; struct cfq_rq *crq; unsigned long flags; might_sleep_if(gfp_mask & __GFP_WAIT); + /* + * use __cfq_queue_pid() here to get the actual queue, regardless of + * whether this is pdflush or not + */ + cic = cfq_get_io_context(cfqd, __cfq_queue_pid(current, bio), gfp_mask); + spin_lock_irqsave(q->queue_lock, flags); - cfqq = __cfq_get_queue(cfqd, cfq_hash_key(cfqd, current), gfp_mask); - if (!cfqq) - goto out_lock; + if (!cic) + goto queue_fail; -repeat: - if (cfqq->allocated[rw] >= cfqd->max_queued) - goto out_lock; + if (!cic->cfqq) { + cfqq = cfq_get_queue(cfqd, cic->ioc->pid, gfp_mask); + if (!cfqq) + goto queue_fail; + + cic->cfqq = cfqq; + } else + cfqq = cic->cfqq; cfqq->allocated[rw]++; + cfqq->must_alloc = 0; + cfqd->rq_starved = 0; + atomic_inc(&cfqq->ref); spin_unlock_irqrestore(q->queue_lock, flags); - /* - * if hashing type has changed, the cfq_queue might change here. - */ - saved_cfqq = cfqq; - cic = cfq_get_io_context(&cfqq, gfp_mask); - if (!cic) - goto err; - - /* - * repeat allocation checks on queue change - */ - if (unlikely(saved_cfqq != cfqq)) { - spin_lock_irqsave(q->queue_lock, flags); - saved_cfqq->allocated[rw]--; - goto repeat; - } - crq = mempool_alloc(cfqd->crq_pool, gfp_mask); if (crq) { RB_CLEAR(&crq->rb_node); @@ -1452,24 +2071,151 @@ repeat: INIT_HLIST_NODE(&crq->hash); crq->cfq_queue = cfqq; crq->io_context = cic; - crq->service_start = crq->queue_start = 0; - crq->in_flight = crq->accounted = crq->is_sync = 0; - crq->is_write = rw; + crq->in_flight = crq->accounted = 0; + crq->is_sync = (rw == READ || process_sync(current)); + crq->requeued = 0; rq->elevator_private = crq; - cfqq->alloc_limit[rw] = 0; return 0; } - put_io_context(cic->ioc); -err: spin_lock_irqsave(q->queue_lock, flags); cfqq->allocated[rw]--; + if (!(cfqq->allocated[0] + cfqq->allocated[1])) + cfqq->must_alloc = 1; cfq_put_queue(cfqq); -out_lock: +queue_fail: + if (cic) + put_io_context(cic->ioc); + /* + * mark us rq allocation starved. we need to kickstart the process + * ourselves if there are no pending requests that can do it for us. + * that would be an extremely rare OOM situation + */ + cfqd->rq_starved = 1; + if (!cfqd->busy_queues) + kblockd_schedule_work(&cfqd->unplug_work); + spin_unlock_irqrestore(q->queue_lock, flags); return 1; } +static void cfq_kick_queue(void *data) +{ + request_queue_t *q = data; + struct cfq_data *cfqd = q->elevator->elevator_data; + unsigned long flags; + + spin_lock_irqsave(q->queue_lock, flags); + + if (cfqd->rq_starved) { + struct request_list *rl = &q->rq; + + /* + * we aren't guaranteed to get a request after this, but we + * have to be opportunistic + */ + smp_mb(); + if (waitqueue_active(&rl->wait[READ])) + wake_up(&rl->wait[READ]); + if (waitqueue_active(&rl->wait[WRITE])) + wake_up(&rl->wait[WRITE]); + } + + blk_remove_plug(q); + q->request_fn(q); + spin_unlock_irqrestore(q->queue_lock, flags); +} + +/* + * Timer running if the active_queue is currently idling inside its time slice + */ +static void cfq_idle_slice_timer(unsigned long data) +{ + struct cfq_data *cfqd = (struct cfq_data *) data; + struct cfq_queue *cfqq; + unsigned long flags; + + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + + if ((cfqq = cfqd->active_queue) != NULL) { + unsigned long now = jiffies; + + WARN_ON(cfqq->dir == CFQ_ASYNC); + + /* + * expired + */ + if (time_after(now, cfqq->slice_end)) + goto miss; + + /* + * only expire and reinvoke request handler, if there are + * other queues with pending requests + */ + if (!cfqd->busy_queues) { + cfqd->idle_slice_timer.expires = min(now + cfqd->cfq_slice_idle, cfqq->slice_end); + add_timer(&cfqd->idle_slice_timer); + goto out_cont; + } + + /* + * not expired and it has a request pending, let it dispatch + */ + if (!RB_EMPTY(&cfqq->sort_list[cfqq->dir])) { + cfqq->must_dispatch = 1; + goto out_kick; + } else if (!RB_EMPTY(&cfqq->sort_list[CFQ_ASYNC]) && + time_before(now + cfqd->cfq_slice_idle, cfqq->slice_end)) { + /* + * see if we should switch to writes if we have some + * slice left, since the read idled out. assign a slice + * end value scale on how much read slice we had left + */ + unsigned long left = cfqq->slice_end - now; + int p, pn; + + p = cfq_prio_to_slice(cfqd, cfqq->ioprio, CFQ_SYNC); + pn = cfq_prio_to_slice(cfqd, cfqq->ioprio, CFQ_ASYNC); + cfqq->slice_end = now + ((pn * left) / p); + cfqq->dir = CFQ_ASYNC; + goto out_kick; + } +miss: + cfqq->idle_miss++; + } + + cfq_slice_expired(cfqd, 0); +out_kick: + if (cfqd->busy_queues) + kblockd_schedule_work(&cfqd->unplug_work); +out_cont: + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); +} + +/* + * Timer running if an idle class queue is waiting for service + */ +static void cfq_idle_class_timer(unsigned long data) +{ + struct cfq_data *cfqd = (struct cfq_data *) data; + unsigned long flags, end; + + spin_lock_irqsave(cfqd->queue->queue_lock, flags); + + /* + * race with a non-idle queue, reset timer + */ + end = cfqd->last_end_request + CFQ_IDLE_GRACE; + if (!time_after_eq(jiffies, end)) { + cfqd->idle_class_timer.expires = end; + add_timer(&cfqd->idle_class_timer); + } else + kblockd_schedule_work(&cfqd->unplug_work); + + spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); +} + + static void cfq_put_cfqd(struct cfq_data *cfqd) { request_queue_t *q = cfqd->queue; @@ -1477,6 +2223,8 @@ static void cfq_put_cfqd(struct cfq_data if (!atomic_dec_and_test(&cfqd->ref)) return; + blk_sync_queue(q); + blk_put_queue(q); mempool_destroy(cfqd->crq_pool); @@ -1487,7 +2235,11 @@ static void cfq_put_cfqd(struct cfq_data static void cfq_exit_queue(elevator_t *e) { - cfq_put_cfqd(e->elevator_data); + struct cfq_data *cfqd = e->elevator_data; + + del_timer_sync(&cfqd->idle_slice_timer); + del_timer_sync(&cfqd->idle_class_timer); + cfq_put_cfqd(cfqd); } static int cfq_init_queue(request_queue_t *q, elevator_t *e) @@ -1500,7 +2252,13 @@ static int cfq_init_queue(request_queue_ return -ENOMEM; memset(cfqd, 0, sizeof(*cfqd)); - INIT_LIST_HEAD(&cfqd->rr_list); + + for (i = 0; i < CFQ_PRIO_LISTS; i++) + INIT_LIST_HEAD(&cfqd->rr_list[i]); + + INIT_LIST_HEAD(&cfqd->busy_rr); + INIT_LIST_HEAD(&cfqd->cur_rr); + INIT_LIST_HEAD(&cfqd->idle_rr); INIT_LIST_HEAD(&cfqd->empty_list); cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); @@ -1525,15 +2283,19 @@ static int cfq_init_queue(request_queue_ cfqd->queue = q; atomic_inc(&q->refcnt); - /* - * just set it to some high value, we want anyone to be able to queue - * some requests. fairness is handled differently - */ - q->nr_requests = 1024; - cfqd->max_queued = q->nr_requests / 16; + cfqd->max_queued = q->nr_requests / 4; q->nr_batching = cfq_queued; - cfqd->key_type = CFQ_KEY_TGID; - cfqd->find_best_crq = 1; + + init_timer(&cfqd->idle_slice_timer); + cfqd->idle_slice_timer.function = cfq_idle_slice_timer; + cfqd->idle_slice_timer.data = (unsigned long) cfqd; + + init_timer(&cfqd->idle_class_timer); + cfqd->idle_class_timer.function = cfq_idle_class_timer; + cfqd->idle_class_timer.data = (unsigned long) cfqd; + + INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); + atomic_set(&cfqd->ref, 1); cfqd->cfq_queued = cfq_queued; @@ -1543,7 +2305,11 @@ static int cfq_init_queue(request_queue_ cfqd->cfq_fifo_batch_expire = cfq_fifo_rate; cfqd->cfq_back_max = cfq_back_max; cfqd->cfq_back_penalty = cfq_back_penalty; - + cfqd->cfq_slice[0] = cfq_slice_async; + cfqd->cfq_slice[1] = cfq_slice_sync; + cfqd->cfq_slice_async_rq = cfq_slice_async_rq; + cfqd->cfq_slice_idle = cfq_slice_idle; + cfqd->cfq_max_depth = cfq_max_depth; return 0; out_crqpool: kfree(cfqd->cfq_hash); @@ -1587,7 +2353,6 @@ fail: return -ENOMEM; } - /* * sysfs parts below --> */ @@ -1612,45 +2377,6 @@ cfq_var_store(unsigned int *var, const c return count; } -static ssize_t -cfq_clear_elapsed(struct cfq_data *cfqd, const char *page, size_t count) -{ - max_elapsed_dispatch = max_elapsed_crq = 0; - return count; -} - -static ssize_t -cfq_set_key_type(struct cfq_data *cfqd, const char *page, size_t count) -{ - spin_lock_irq(cfqd->queue->queue_lock); - if (!strncmp(page, "pgid", 4)) - cfqd->key_type = CFQ_KEY_PGID; - else if (!strncmp(page, "tgid", 4)) - cfqd->key_type = CFQ_KEY_TGID; - else if (!strncmp(page, "uid", 3)) - cfqd->key_type = CFQ_KEY_UID; - else if (!strncmp(page, "gid", 3)) - cfqd->key_type = CFQ_KEY_GID; - spin_unlock_irq(cfqd->queue->queue_lock); - return count; -} - -static ssize_t -cfq_read_key_type(struct cfq_data *cfqd, char *page) -{ - ssize_t len = 0; - int i; - - for (i = CFQ_KEY_PGID; i < CFQ_KEY_LAST; i++) { - if (cfqd->key_type == i) - len += sprintf(page+len, "[%s] ", cfq_key_types[i]); - else - len += sprintf(page+len, "%s ", cfq_key_types[i]); - } - len += sprintf(page+len, "\n"); - return len; -} - #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ { \ @@ -1664,9 +2390,13 @@ SHOW_FUNCTION(cfq_queued_show, cfqd->cfq SHOW_FUNCTION(cfq_fifo_expire_r_show, cfqd->cfq_fifo_expire_r, 1); SHOW_FUNCTION(cfq_fifo_expire_w_show, cfqd->cfq_fifo_expire_w, 1); SHOW_FUNCTION(cfq_fifo_batch_expire_show, cfqd->cfq_fifo_batch_expire, 1); -SHOW_FUNCTION(cfq_find_best_show, cfqd->find_best_crq, 0); SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); +SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); +SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); +SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); +SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); +SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ @@ -1689,9 +2419,13 @@ STORE_FUNCTION(cfq_queued_store, &cfqd-> STORE_FUNCTION(cfq_fifo_expire_r_store, &cfqd->cfq_fifo_expire_r, 1, UINT_MAX, 1); STORE_FUNCTION(cfq_fifo_expire_w_store, &cfqd->cfq_fifo_expire_w, 1, UINT_MAX, 1); STORE_FUNCTION(cfq_fifo_batch_expire_store, &cfqd->cfq_fifo_batch_expire, 0, UINT_MAX, 1); -STORE_FUNCTION(cfq_find_best_store, &cfqd->find_best_crq, 0, 1, 0); STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); +STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); +STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); #undef STORE_FUNCTION static struct cfq_fs_entry cfq_quantum_entry = { @@ -1719,11 +2453,6 @@ static struct cfq_fs_entry cfq_fifo_batc .show = cfq_fifo_batch_expire_show, .store = cfq_fifo_batch_expire_store, }; -static struct cfq_fs_entry cfq_find_best_entry = { - .attr = {.name = "find_best_crq", .mode = S_IRUGO | S_IWUSR }, - .show = cfq_find_best_show, - .store = cfq_find_best_store, -}; static struct cfq_fs_entry cfq_back_max_entry = { .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, .show = cfq_back_max_show, @@ -1734,14 +2463,30 @@ static struct cfq_fs_entry cfq_back_pena .show = cfq_back_penalty_show, .store = cfq_back_penalty_store, }; -static struct cfq_fs_entry cfq_clear_elapsed_entry = { - .attr = {.name = "clear_elapsed", .mode = S_IWUSR }, - .store = cfq_clear_elapsed, +static struct cfq_fs_entry cfq_slice_sync_entry = { + .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_slice_sync_show, + .store = cfq_slice_sync_store, +}; +static struct cfq_fs_entry cfq_slice_async_entry = { + .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_slice_async_show, + .store = cfq_slice_async_store, +}; +static struct cfq_fs_entry cfq_slice_async_rq_entry = { + .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_slice_async_rq_show, + .store = cfq_slice_async_rq_store, }; -static struct cfq_fs_entry cfq_key_type_entry = { - .attr = {.name = "key_type", .mode = S_IRUGO | S_IWUSR }, - .show = cfq_read_key_type, - .store = cfq_set_key_type, +static struct cfq_fs_entry cfq_slice_idle_entry = { + .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_slice_idle_show, + .store = cfq_slice_idle_store, +}; +static struct cfq_fs_entry cfq_max_depth_entry = { + .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, + .show = cfq_max_depth_show, + .store = cfq_max_depth_store, }; static struct attribute *default_attrs[] = { @@ -1750,11 +2495,13 @@ static struct attribute *default_attrs[] &cfq_fifo_expire_r_entry.attr, &cfq_fifo_expire_w_entry.attr, &cfq_fifo_batch_expire_entry.attr, - &cfq_key_type_entry.attr, - &cfq_find_best_entry.attr, &cfq_back_max_entry.attr, &cfq_back_penalty_entry.attr, - &cfq_clear_elapsed_entry.attr, + &cfq_slice_sync_entry.attr, + &cfq_slice_async_entry.attr, + &cfq_slice_async_rq_entry.attr, + &cfq_slice_idle_entry.attr, + &cfq_max_depth_entry.attr, NULL, }; @@ -1819,25 +2566,50 @@ static struct elevator_type iosched_cfq .elevator_owner = THIS_MODULE, }; -int cfq_init(void) +static int __init cfq_init(void) { int ret; + /* + * could be 0 on HZ < 1000 setups + */ + if (!cfq_slice_async) + cfq_slice_async = 1; + if (!cfq_slice_idle) + cfq_slice_idle = 1; + if (cfq_slab_setup()) return -ENOMEM; ret = elv_register(&iosched_cfq); - if (!ret) { - __module_get(THIS_MODULE); - return 0; - } + if (ret) + cfq_slab_kill(); - cfq_slab_kill(); return ret; } static void __exit cfq_exit(void) { + struct task_struct *g, *p; + unsigned long flags; + + read_lock_irqsave(&tasklist_lock, flags); + + /* + * iterate each process in the system, removing our io_context + */ + do_each_thread(g, p) { + struct io_context *ioc = p->io_context; + + if (ioc && ioc->cic) { + ioc->cic->exit(ioc->cic); + cfq_free_io_context(ioc->cic); + ioc->cic = NULL; + } + } while_each_thread(g, p); + + read_unlock_irqrestore(&tasklist_lock, flags); + cfq_slab_kill(); elv_unregister(&iosched_cfq); } ===== drivers/block/deadline-iosched.c 1.33 vs edited ===== --- 1.33/drivers/block/deadline-iosched.c 2005-01-11 02:29:34 +01:00 +++ edited/drivers/block/deadline-iosched.c 2005-01-12 20:22:18 +01:00 @@ -758,7 +758,8 @@ static void deadline_put_request(request } static int -deadline_set_request(request_queue_t *q, struct request *rq, int gfp_mask) +deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, + int gfp_mask) { struct deadline_data *dd = q->elevator->elevator_data; struct deadline_rq *drq; ===== drivers/block/elevator.c 1.62 vs edited ===== --- 1.62/drivers/block/elevator.c 2005-01-12 09:03:47 +01:00 +++ edited/drivers/block/elevator.c 2005-01-12 20:22:18 +01:00 @@ -438,12 +438,13 @@ struct request *elv_former_request(reque return NULL; } -int elv_set_request(request_queue_t *q, struct request *rq, int gfp_mask) +int elv_set_request(request_queue_t *q, struct request *rq, struct bio *bio, + int gfp_mask) { elevator_t *e = q->elevator; if (e->ops->elevator_set_req_fn) - return e->ops->elevator_set_req_fn(q, rq, gfp_mask); + return e->ops->elevator_set_req_fn(q, rq, bio, gfp_mask); rq->elevator_private = NULL; return 0; @@ -457,12 +458,12 @@ void elv_put_request(request_queue_t *q, e->ops->elevator_put_req_fn(q, rq); } -int elv_may_queue(request_queue_t *q, int rw) +int elv_may_queue(request_queue_t *q, int rw, struct bio *bio) { elevator_t *e = q->elevator; if (e->ops->elevator_may_queue_fn) - return e->ops->elevator_may_queue_fn(q, rw); + return e->ops->elevator_may_queue_fn(q, rw, bio); return ELV_MQUEUE_MAY; } ===== drivers/block/ll_rw_blk.c 1.281 vs edited ===== --- 1.281/drivers/block/ll_rw_blk.c 2004-12-01 09:13:57 +01:00 +++ edited/drivers/block/ll_rw_blk.c 2005-01-12 20:22:18 +01:00 @@ -1257,11 +1257,7 @@ void __generic_unplug_device(request_que if (!blk_remove_plug(q)) return; - /* - * was plugged, fire request_fn if queue has stuff to do - */ - if (elv_next_request(q)) - q->request_fn(q); + q->request_fn(q); } EXPORT_SYMBOL(__generic_unplug_device); @@ -1438,6 +1434,7 @@ static int blk_init_free_list(request_qu struct request_list *rl = &q->rq; rl->count[READ] = rl->count[WRITE] = 0; + rl->starved[READ] = rl->starved[WRITE] = 0; init_waitqueue_head(&rl->wait[READ]); init_waitqueue_head(&rl->wait[WRITE]); init_waitqueue_head(&rl->drain); @@ -1563,8 +1560,8 @@ static inline void blk_free_request(requ mempool_free(rq, q->rq.rq_pool); } -static inline struct request *blk_alloc_request(request_queue_t *q, int rw, - int gfp_mask) +static inline struct request * +blk_alloc_request(request_queue_t *q, int rw, struct bio *bio, int gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -1577,7 +1574,7 @@ static inline struct request *blk_alloc_ */ rq->flags = rw; - if (!elv_set_request(q, rq, gfp_mask)) + if (!elv_set_request(q, rq, bio, gfp_mask)) return rq; mempool_free(rq, q->rq.rq_pool); @@ -1618,6 +1615,22 @@ void ioc_set_batching(request_queue_t *q ioc->last_waited = jiffies; } +static void __freed_request(request_queue_t *q, int rw) +{ + struct request_list *rl = &q->rq; + + if (rl->count[rw] < queue_congestion_off_threshold(q)) + clear_queue_congested(q, rw); + + if (rl->count[rw] + 1 <= q->nr_requests) { + smp_mb(); + if (waitqueue_active(&rl->wait[rw])) + wake_up(&rl->wait[rw]); + + blk_clear_queue_full(q, rw); + } +} + /* * A request has just been released. Account for it, update the full and * congestion status, wake up any waiters. Called under q->queue_lock. @@ -1627,24 +1640,25 @@ static void freed_request(request_queue_ struct request_list *rl = &q->rq; rl->count[rw]--; - if (rl->count[rw] < queue_congestion_off_threshold(q)) - clear_queue_congested(q, rw); - if (rl->count[rw]+1 <= q->nr_requests) { + + __freed_request(q, rw); + + if (unlikely(rl->starved[rw ^ 1])) + __freed_request(q, rw ^ 1); + + if (!rl->count[READ] && !rl->count[WRITE]) { smp_mb(); - if (waitqueue_active(&rl->wait[rw])) - wake_up(&rl->wait[rw]); - blk_clear_queue_full(q, rw); + if (unlikely(waitqueue_active(&rl->drain))) + wake_up(&rl->drain); } - if (unlikely(waitqueue_active(&rl->drain)) && - !rl->count[READ] && !rl->count[WRITE]) - wake_up(&rl->drain); } #define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) /* * Get a free request, queue_lock must not be held */ -static struct request *get_request(request_queue_t *q, int rw, int gfp_mask) +static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, + int gfp_mask) { struct request *rq = NULL; struct request_list *rl = &q->rq; @@ -1667,10 +1681,9 @@ static struct request *get_request(reque } } - switch (elv_may_queue(q, rw)) { + switch (elv_may_queue(q, rw, bio)) { case ELV_MQUEUE_NO: - spin_unlock_irq(q->queue_lock); - goto out; + goto rq_starved; case ELV_MQUEUE_MAY: break; case ELV_MQUEUE_MUST: @@ -1688,11 +1701,12 @@ static struct request *get_request(reque get_rq: rl->count[rw]++; + rl->starved[rw] = 0; if (rl->count[rw] >= queue_congestion_on_threshold(q)) set_queue_congested(q, rw); spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw, gfp_mask); + rq = blk_alloc_request(q, rw, bio, gfp_mask); if (!rq) { /* * Allocation failed presumably due to memory. Undo anything @@ -1703,6 +1717,18 @@ get_rq: */ spin_lock_irq(q->queue_lock); freed_request(q, rw); + + /* + * in the very unlikely event that allocation failed and no + * requests for this direction was pending, mark us starved + * so that freeing of a request in the other direction will + * notice us. another possible fix would be to split the + * rq mempool into READ and WRITE + */ +rq_starved: + if (unlikely(rl->count[rw] == 0)) + rl->starved[rw] = 1; + spin_unlock_irq(q->queue_lock); goto out; } @@ -1715,6 +1741,7 @@ get_rq: rq->errors = 0; rq->rq_status = RQ_ACTIVE; rq->bio = rq->biotail = NULL; + rq->ioprio = 0; rq->buffer = NULL; rq->ref_count = 1; rq->q = q; @@ -1724,7 +1751,6 @@ get_rq: rq->data_len = 0; rq->data = NULL; rq->sense = NULL; - out: put_io_context(ioc); return rq; @@ -1734,7 +1760,8 @@ out: * No available requests for this queue, unplug the device and wait for some * requests to become available. */ -static struct request *get_request_wait(request_queue_t *q, int rw) +static struct request *get_request_wait(request_queue_t *q, int rw, + struct bio *bio) { DEFINE_WAIT(wait); struct request *rq; @@ -1746,7 +1773,7 @@ static struct request *get_request_wait( prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - rq = get_request(q, rw, GFP_NOIO); + rq = get_request(q, rw, bio, GFP_NOIO); if (!rq) { struct io_context *ioc; @@ -1776,9 +1803,9 @@ struct request *blk_get_request(request_ BUG_ON(rw != READ && rw != WRITE); if (gfp_mask & __GFP_WAIT) - rq = get_request_wait(q, rw); + rq = get_request_wait(q, rw, NULL); else - rq = get_request(q, rw, gfp_mask); + rq = get_request(q, rw, NULL, gfp_mask); return rq; } @@ -2152,7 +2179,6 @@ void __blk_put_request(request_queue_t * return; req->rq_status = RQ_INACTIVE; - req->q = NULL; req->rl = NULL; /* @@ -2262,6 +2288,8 @@ static int attempt_merge(request_queue_t req->rq_disk->in_flight--; } + req->ioprio = ioprio_best(req->ioprio, next->ioprio); + __blk_put_request(q, next); return 1; } @@ -2324,11 +2352,13 @@ static int __make_request(request_queue_ { struct request *req, *freereq = NULL; int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err; + unsigned short prio; sector_t sector; sector = bio->bi_sector; nr_sectors = bio_sectors(bio); cur_nr_sectors = bio_cur_sectors(bio); + prio = bio_prio(bio); rw = bio_data_dir(bio); @@ -2368,6 +2398,7 @@ again: req->biotail->bi_next = bio; req->biotail = bio; req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->ioprio = ioprio_best(req->ioprio, prio); drive_stat_acct(req, nr_sectors, 0); if (!attempt_back_merge(q, req)) elv_merged_request(q, req); @@ -2392,6 +2423,7 @@ again: req->hard_cur_sectors = cur_nr_sectors; req->sector = req->hard_sector = sector; req->nr_sectors = req->hard_nr_sectors += nr_sectors; + req->ioprio = ioprio_best(req->ioprio, prio); drive_stat_acct(req, nr_sectors, 0); if (!attempt_front_merge(q, req)) elv_merged_request(q, req); @@ -2419,7 +2451,7 @@ get_rq: freereq = NULL; } else { spin_unlock_irq(q->queue_lock); - if ((freereq = get_request(q, rw, GFP_ATOMIC)) == NULL) { + if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) { /* * READA bit set */ @@ -2427,7 +2459,7 @@ get_rq: if (bio_rw_ahead(bio)) goto end_io; - freereq = get_request_wait(q, rw); + freereq = get_request_wait(q, rw, bio); } goto again; } @@ -2455,6 +2487,7 @@ get_rq: req->buffer = bio_data(bio); /* see ->buffer comment above */ req->waiting = NULL; req->bio = req->biotail = bio; + req->ioprio = prio; req->rq_disk = bio->bi_bdev->bd_disk; req->start_time = jiffies; @@ -2502,6 +2535,7 @@ void blk_finish_queue_drain(request_queu { struct request_list *rl = &q->rq; struct request *rq; + int requeued = 0; spin_lock_irq(q->queue_lock); clear_bit(QUEUE_FLAG_DRAIN, &q->queue_flags); @@ -2510,9 +2544,13 @@ void blk_finish_queue_drain(request_queu rq = list_entry_rq(q->drain_list.next); list_del_init(&rq->queuelist); - __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 1); + elv_requeue_request(q, rq); + requeued++; } + if (requeued) + q->request_fn(q); + spin_unlock_irq(q->queue_lock); wake_up(&rl->wait[0]); @@ -2709,7 +2747,7 @@ void submit_bio(int rw, struct bio *bio) BIO_BUG_ON(!bio->bi_size); BIO_BUG_ON(!bio->bi_io_vec); - bio->bi_rw = rw; + bio->bi_rw |= rw; if (rw & WRITE) mod_page_state(pgpgout, count); else @@ -3042,6 +3080,30 @@ int __init blk_dev_init(void) /* * IO Context helper functions */ +void free_io_context(struct io_context *ioc) +{ + kmem_cache_free(iocontext_cachep, ioc); +} +EXPORT_SYMBOL(free_io_context); + +struct io_context *alloc_io_context(int gfp_flags) +{ + struct io_context *ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); + + if (ret) { + atomic_set(&ret->refcount, 1); + ret->task = current; + ret->set_ioprio = NULL; + ret->last_waited = jiffies; /* doesn't matter... */ + ret->nr_batch_requests = 0; /* because this is 0 */ + ret->aic = NULL; + ret->cic = NULL; + } + + return ret; +} +EXPORT_SYMBOL(alloc_io_context); + void put_io_context(struct io_context *ioc) { if (ioc == NULL) @@ -3055,7 +3117,7 @@ void put_io_context(struct io_context *i if (ioc->cic && ioc->cic->dtor) ioc->cic->dtor(ioc->cic); - kmem_cache_free(iocontext_cachep, ioc); + free_io_context(ioc); } } EXPORT_SYMBOL(put_io_context); @@ -3069,6 +3131,7 @@ void exit_io_context(void) local_irq_save(flags); ioc = current->io_context; current->io_context = NULL; + ioc->task = NULL; local_irq_restore(flags); if (ioc->aic && ioc->aic->exit) @@ -3100,25 +3163,18 @@ struct io_context *get_io_context(int gf local_irq_restore(flags); - ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); + ret = alloc_io_context(gfp_flags); if (ret) { - atomic_set(&ret->refcount, 1); - ret->pid = tsk->pid; - ret->last_waited = jiffies; /* doesn't matter... */ - ret->nr_batch_requests = 0; /* because this is 0 */ - ret->aic = NULL; - ret->cic = NULL; - spin_lock_init(&ret->lock); - local_irq_save(flags); /* * very unlikely, someone raced with us in setting up the task * io context. free new context and just grab a reference. */ - if (!tsk->io_context) + if (!tsk->io_context) { tsk->io_context = ret; - else { + ret->pid = tsk->pid; + } else { kmem_cache_free(iocontext_cachep, ret); ret = tsk->io_context; } ===== fs/Makefile 1.68 vs edited ===== --- 1.68/fs/Makefile 2005-01-11 02:29:35 +01:00 +++ edited/fs/Makefile 2005-01-12 20:22:18 +01:00 @@ -10,6 +10,7 @@ obj-y := open.o read_write.o file_table. ioctl.o readdir.o select.o fifo.o locks.o dcache.o inode.o \ attr.o bad_inode.o file.o filesystems.o namespace.o aio.o \ seq_file.o xattr.o libfs.o fs-writeback.o mpage.o direct-io.o \ + ioprio.o obj-$(CONFIG_EPOLL) += eventpoll.o obj-$(CONFIG_COMPAT) += compat.o ===== fs/buffer.c 1.269 vs edited ===== --- 1.269/fs/buffer.c 2005-01-12 01:43:13 +01:00 +++ edited/fs/buffer.c 2005-01-12 20:22:18 +01:00 @@ -874,6 +874,7 @@ int __set_page_dirty_buffers(struct page spin_unlock(&mapping->private_lock); if (!TestSetPageDirty(page)) { + set_page_dirty_pid(page); spin_lock_irq(&mapping->tree_lock); if (page->mapping) { /* Race with truncate? */ if (!mapping->backing_dev_info->memory_backed) ===== fs/reiserfs/journal.c 1.99 vs edited ===== --- 1.99/fs/reiserfs/journal.c 2005-01-08 06:44:20 +01:00 +++ edited/fs/reiserfs/journal.c 2005-01-12 20:22:18 +01:00 @@ -646,18 +646,22 @@ struct buffer_chunk { static void write_chunk(struct buffer_chunk *chunk) { int i; + get_fs_excl(); for (i = 0; i < chunk->nr ; i++) { submit_logged_buffer(chunk->bh[i]) ; } chunk->nr = 0; + put_fs_excl(); } static void write_ordered_chunk(struct buffer_chunk *chunk) { int i; + get_fs_excl(); for (i = 0; i < chunk->nr ; i++) { submit_ordered_buffer(chunk->bh[i]) ; } chunk->nr = 0; + put_fs_excl(); } static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, @@ -919,6 +923,8 @@ static int flush_commit_list(struct supe return 0 ; } + get_fs_excl(); + /* before we can put our commit blocks on disk, we have to make sure everyone older than ** us is on disk too */ @@ -1056,6 +1062,7 @@ put_jl: if (retval) reiserfs_abort (s, retval, "Journal write error in %s", __FUNCTION__); + put_fs_excl(); return retval; } @@ -1252,6 +1259,8 @@ static int flush_journal_list(struct sup return 0 ; } + get_fs_excl(); + /* if all the work is already done, get out of here */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { @@ -1451,6 +1460,7 @@ flush_older_and_return: put_journal_list(s, jl); if (flushall) up(&journal->j_flush_sem); + put_fs_excl(); return err ; } @@ -2720,6 +2730,7 @@ relock: th->t_trans_id = journal->j_trans_id ; unlock_journal(p_s_sb) ; INIT_LIST_HEAD (&th->t_list); + get_fs_excl(); return 0 ; out_fail: @@ -3523,6 +3534,7 @@ static int do_journal_end(struct reiserf BUG_ON (th->t_refcount > 1); BUG_ON (!th->t_trans_id); + put_fs_excl(); current->journal_info = th->t_handle_save; reiserfs_check_lock_depth(p_s_sb, "journal end"); if (journal->j_len == 0) { ===== include/asm-i386/unistd.h 1.43 vs edited ===== --- 1.43/include/asm-i386/unistd.h 2004-10-24 12:32:46 +02:00 +++ edited/include/asm-i386/unistd.h 2005-01-12 20:22:18 +01:00 @@ -294,8 +294,10 @@ #define __NR_add_key 286 #define __NR_request_key 287 #define __NR_keyctl 288 +#define __NR_ioprio_set 289 +#define __NR_ioprio_get 290 -#define NR_syscalls 289 +#define NR_syscalls 291 /* * user-visible error numbers are in the range -1 - -128: see ===== include/asm-ia64/unistd.h 1.54 vs edited ===== --- 1.54/include/asm-ia64/unistd.h 2005-01-05 03:48:14 +01:00 +++ edited/include/asm-ia64/unistd.h 2005-01-12 20:22:18 +01:00 @@ -263,6 +263,8 @@ #define __NR_add_key 1271 #define __NR_request_key 1272 #define __NR_keyctl 1273 +#define __NR_ioprio_set 1274 +#define __NR_ioprio_get 1275 #ifdef __KERNEL__ ===== include/asm-ppc/unistd.h 1.35 vs edited ===== --- 1.35/include/asm-ppc/unistd.h 2005-01-04 00:49:20 +01:00 +++ edited/include/asm-ppc/unistd.h 2005-01-12 20:22:18 +01:00 @@ -276,8 +276,10 @@ #define __NR_add_key 269 #define __NR_request_key 270 #define __NR_keyctl 271 +#define __NR_ioprio_set 272 +#define __NR_ioprio_get 273 -#define __NR_syscalls 272 +#define __NR_syscalls 274 #define __NR(n) #n ===== include/asm-x86_64/unistd.h 1.34 vs edited ===== --- 1.34/include/asm-x86_64/unistd.h 2005-01-12 01:42:45 +01:00 +++ edited/include/asm-x86_64/unistd.h 2005-01-12 20:22:18 +01:00 @@ -562,8 +562,12 @@ __SYSCALL(__NR_add_key, sys_add_key) __SYSCALL(__NR_request_key, sys_request_key) #define __NR_keyctl 250 __SYSCALL(__NR_keyctl, sys_keyctl) +#define __NR_ioprio_set 251 +__SYSCALL(__NR_ioprio_set, sys_ioprio_set) +#define __NR_ioprio_get 252 +__SYSCALL(__NR_ioprio_get, sys_ioprio_get) -#define __NR_syscall_max __NR_keyctl +#define __NR_syscall_max __NR_ioprio_get #ifndef __NO_STUBS /* user-visible error numbers are in the range -1 - -4095 */ ===== include/linux/bio.h 1.43 vs edited ===== --- 1.43/include/linux/bio.h 2004-11-06 12:03:05 +01:00 +++ edited/include/linux/bio.h 2005-01-12 20:22:18 +01:00 @@ -22,6 +22,7 @@ #include #include +#include /* Platforms may set this to teach the BIO layer about IOMMU hardware. */ #include @@ -148,6 +149,19 @@ struct bio { #define BIO_RW_SYNC 4 /* + * upper 16 bits of bi_rw define the io priority of this bio + */ +#define BIO_PRIO_SHIFT (8 * sizeof(unsigned long) - IOPRIO_BITS) +#define bio_prio(bio) ((bio)->bi_rw >> BIO_PRIO_SHIFT) +#define bio_prio_valid(bio) ioprio_valid(bio_prio(bio)) + +#define bio_set_prio(bio, prio) do { \ + WARN_ON(prio >= (1 << IOPRIO_BITS)); \ + (bio)->bi_rw &= ((1UL << BIO_PRIO_SHIFT) - 1); \ + (bio)->bi_rw |= ((unsigned long) (prio) << BIO_PRIO_SHIFT); \ +} while (0) + +/* * various member access, note that bio_data should of course not be used * on highmem page vectors */ @@ -328,5 +342,17 @@ extern inline char *__bio_kmap_irq(struc #define bio_kmap_irq(bio, flags) \ __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) #define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) + +/* + * slight risk of collision if several processes are dirtying data in the + * same area on disk + */ +static inline int bio_dirty_pid(struct bio *bio) +{ + if (bio && bio->bi_vcnt) + return get_page_dirty_pid(bio_iovec_idx(bio, 0)->bv_page); + + return 0; +} #endif /* __LINUX_BIO_H */ ===== include/linux/blkdev.h 1.157 vs edited ===== --- 1.157/include/linux/blkdev.h 2004-11-11 09:39:16 +01:00 +++ edited/include/linux/blkdev.h 2005-01-12 20:22:18 +01:00 @@ -54,16 +54,24 @@ struct as_io_context { struct cfq_queue; struct cfq_io_context { - void (*dtor)(struct cfq_io_context *); - void (*exit)(struct cfq_io_context *); - - struct io_context *ioc; - /* * circular list of cfq_io_contexts belonging to a process io context */ struct list_head list; + struct rcu_head rcu; struct cfq_queue *cfqq; + void *key; + + struct io_context *ioc; + + unsigned long last_end_request; + unsigned long last_queue; + unsigned long ttime_total; + unsigned long ttime_samples; + unsigned long ttime_mean; + + void (*dtor)(struct cfq_io_context *); + void (*exit)(struct cfq_io_context *); }; /* @@ -74,6 +82,9 @@ struct cfq_io_context { struct io_context { atomic_t refcount; pid_t pid; + struct task_struct *task; + + int (*set_ioprio)(struct io_context *, unsigned int); /* * For request batching @@ -81,8 +92,6 @@ struct io_context { unsigned long last_waited; /* Time last woken after wait for request */ int nr_batch_requests; /* Number of requests left in the batch */ - spinlock_t lock; - struct as_io_context *aic; struct cfq_io_context *cic; }; @@ -90,11 +99,14 @@ struct io_context { void put_io_context(struct io_context *ioc); void exit_io_context(void); struct io_context *get_io_context(int gfp_flags); +struct io_context *alloc_io_context(int gfp_flags); +void free_io_context(struct io_context *ioc); void copy_io_context(struct io_context **pdst, struct io_context **psrc); void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); struct request_list { int count[2]; + int starved[2]; mempool_t *rq_pool; wait_queue_head_t wait[2]; wait_queue_head_t drain; @@ -129,6 +141,8 @@ struct request { struct bio *biotail; void *elevator_private; + + unsigned short ioprio; int rq_status; /* should split this into a few status bits */ struct gendisk *rq_disk; ===== include/linux/elevator.h 1.33 vs edited ===== --- 1.33/include/linux/elevator.h 2004-10-19 11:40:18 +02:00 +++ edited/include/linux/elevator.h 2005-01-12 20:22:18 +01:00 @@ -16,9 +16,9 @@ typedef void (elevator_remove_req_fn) (r typedef void (elevator_requeue_req_fn) (request_queue_t *, struct request *); typedef struct request *(elevator_request_list_fn) (request_queue_t *, struct request *); typedef void (elevator_completed_req_fn) (request_queue_t *, struct request *); -typedef int (elevator_may_queue_fn) (request_queue_t *, int); +typedef int (elevator_may_queue_fn) (request_queue_t *, int, struct bio *); -typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, int); +typedef int (elevator_set_req_fn) (request_queue_t *, struct request *, struct bio *, int); typedef void (elevator_put_req_fn) (request_queue_t *, struct request *); typedef int (elevator_init_fn) (request_queue_t *, elevator_t *); @@ -93,9 +93,9 @@ extern struct request *elv_former_reques extern struct request *elv_latter_request(request_queue_t *, struct request *); extern int elv_register_queue(request_queue_t *q); extern void elv_unregister_queue(request_queue_t *q); -extern int elv_may_queue(request_queue_t *, int); +extern int elv_may_queue(request_queue_t *, int, struct bio *); extern void elv_completed_request(request_queue_t *, struct request *); -extern int elv_set_request(request_queue_t *, struct request *, int); +extern int elv_set_request(request_queue_t *, struct request *, struct bio *, int); extern void elv_put_request(request_queue_t *, struct request *); /* ===== include/linux/fs.h 1.371 vs edited ===== --- 1.371/include/linux/fs.h 2005-01-12 01:43:13 +01:00 +++ edited/include/linux/fs.h 2005-01-12 20:22:18 +01:00 @@ -18,6 +18,7 @@ #include #include #include +#include #include struct iovec; @@ -816,16 +817,34 @@ enum { #define vfs_check_frozen(sb, level) \ wait_event((sb)->s_wait_unfrozen, ((sb)->s_frozen < (level))) +static inline void get_fs_excl(void) +{ + atomic_inc(¤t->fs_excl); +} + +static inline void put_fs_excl(void) +{ + atomic_dec(¤t->fs_excl); +} + +static inline int has_fs_excl(void) +{ + return atomic_read(¤t->fs_excl); +} + + /* * Superblock locking. */ static inline void lock_super(struct super_block * sb) { + get_fs_excl(); down(&sb->s_lock); } static inline void unlock_super(struct super_block * sb) { + put_fs_excl(); up(&sb->s_lock); } ===== include/linux/init_task.h 1.33 vs edited ===== --- 1.33/include/linux/init_task.h 2005-01-05 03:48:20 +01:00 +++ edited/include/linux/init_task.h 2005-01-12 20:22:18 +01:00 @@ -80,6 +80,7 @@ extern struct group_info init_groups; .mm = NULL, \ .active_mm = &init_mm, \ .run_list = LIST_HEAD_INIT(tsk.run_list), \ + .ioprio = 0, \ .time_slice = HZ, \ .tasks = LIST_HEAD_INIT(tsk.tasks), \ .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ @@ -112,6 +113,7 @@ extern struct group_info init_groups; .proc_lock = SPIN_LOCK_UNLOCKED, \ .switch_lock = SPIN_LOCK_UNLOCKED, \ .journal_info = NULL, \ + .fs_excl = ATOMIC_INIT(0), \ } ===== include/linux/mm.h 1.211 vs edited ===== --- 1.211/include/linux/mm.h 2005-01-11 02:29:23 +01:00 +++ edited/include/linux/mm.h 2005-01-12 20:22:18 +01:00 @@ -260,6 +260,12 @@ struct page { void *virtual; /* Kernel virtual address (NULL if not kmapped, ie. highmem) */ #endif /* WANT_PAGE_VIRTUAL */ + /* + * Keep track of who dirtied this page + */ +#if defined(CONFIG_IOPRIO_WRITE) + pid_t dirty_pid; +#endif }; /* @@ -853,6 +859,22 @@ int in_gate_area(struct task_struct *tas int in_gate_area_no_task(unsigned long addr); #define in_gate_area(task, addr) ({(void)task; in_gate_area_no_task(addr);}) #endif /* __HAVE_ARCH_GATE_AREA */ + +#if defined(CONFIG_IOPRIO_WRITE) +#define get_page_dirty_pid(page) ((page)->dirty_pid) +#define set_page_dirty_pid(page) \ + do { \ + (page)->dirty_pid = current->pid; \ + } while (0) +#define clear_page_dirty_pid(page) \ + do { \ + (page)->dirty_pid = 0; \ + } while (0) +#else +#define set_page_dirty_pid(page) do { } while (0) +#define get_page_dirty_pid(page) (0) +#define clear_page_dirty_pid(page) do { } while (0) +#endif #endif /* __KERNEL__ */ #endif /* _LINUX_MM_H */ ===== include/linux/sched.h 1.291 vs edited ===== --- 1.291/include/linux/sched.h 2005-01-12 01:42:57 +01:00 +++ edited/include/linux/sched.h 2005-01-12 20:22:18 +01:00 @@ -538,6 +538,8 @@ struct task_struct { struct list_head run_list; prio_array_t *array; + unsigned short ioprio; + unsigned long sleep_avg; unsigned long long timestamp, last_ran; int activated; @@ -685,6 +687,7 @@ struct task_struct { struct mempolicy *mempolicy; short il_next; /* could be shared with used_math */ #endif + atomic_t fs_excl; /* holding fs exclusive resources */ }; static inline pid_t process_group(struct task_struct *tsk) ===== include/linux/writeback.h 1.32 vs edited ===== --- 1.32/include/linux/writeback.h 2004-11-11 09:25:52 +01:00 +++ edited/include/linux/writeback.h 2005-01-12 20:22:18 +01:00 @@ -14,10 +14,12 @@ extern struct list_head inode_unused; * Yes, writeback.h requires sched.h * No, sched.h is not included from here. */ -static inline int current_is_pdflush(void) +static inline int task_is_pdflush(struct task_struct *task) { - return current->flags & PF_FLUSHER; + return task->flags & PF_FLUSHER; } + +#define current_is_pdflush() task_is_pdflush(current) /* * fs/fs-writeback.c ===== kernel/exit.c 1.181 vs edited ===== --- 1.181/kernel/exit.c 2005-01-12 01:42:35 +01:00 +++ edited/kernel/exit.c 2005-01-12 20:22:18 +01:00 @@ -782,6 +782,8 @@ fastcall NORET_TYPE void do_exit(long co profile_task_exit(tsk); + WARN_ON(atomic_read(&tsk->fs_excl)); + if (unlikely(in_interrupt())) panic("Aiee, killing interrupt handler!"); if (unlikely(!tsk->pid)) ===== kernel/fork.c 1.232 vs edited ===== --- 1.232/kernel/fork.c 2005-01-12 01:42:35 +01:00 +++ edited/kernel/fork.c 2005-01-12 20:22:18 +01:00 @@ -1021,6 +1021,11 @@ static task_t *copy_process(unsigned lon spin_unlock(¤t->sighand->siglock); } + /* + * inherit ioprio + */ + p->ioprio = current->ioprio; + SET_LINKS(p); if (unlikely(p->ptrace & PT_PTRACED)) __ptrace_link(p, current->parent); ===== mm/filemap.c 1.288 vs edited ===== --- 1.288/mm/filemap.c 2005-01-08 06:44:08 +01:00 +++ edited/mm/filemap.c 2005-01-12 20:22:18 +01:00 @@ -443,6 +443,7 @@ void end_page_writeback(struct page *pag BUG(); } smp_mb__after_clear_bit(); + clear_page_dirty_pid(page); wake_up_page(page, PG_writeback); } EXPORT_SYMBOL(end_page_writeback); ===== mm/page-writeback.c 1.95 vs edited ===== --- 1.95/mm/page-writeback.c 2004-10-21 10:39:27 +02:00 +++ edited/mm/page-writeback.c 2005-01-12 20:22:18 +01:00 @@ -587,6 +587,8 @@ int __set_page_dirty_nobuffers(struct pa struct address_space *mapping = page_mapping(page); struct address_space *mapping2; + set_page_dirty_pid(page); + if (mapping) { spin_lock_irq(&mapping->tree_lock); mapping2 = page_mapping(page); @@ -628,6 +630,8 @@ EXPORT_SYMBOL(redirty_page_for_writepage int fastcall set_page_dirty(struct page *page) { struct address_space *mapping = page_mapping(page); + + set_page_dirty_pid(page); if (likely(mapping)) { int (*spd)(struct page *) = mapping->a_ops->set_page_dirty; ===== mm/page_alloc.c 1.254 vs edited ===== --- 1.254/mm/page_alloc.c 2005-01-11 02:29:33 +01:00 +++ edited/mm/page_alloc.c 2005-01-12 20:22:18 +01:00 @@ -1502,6 +1502,7 @@ void __init memmap_init_zone(unsigned lo reset_page_mapcount(page); SetPageReserved(page); INIT_LIST_HEAD(&page->lru); + clear_page_dirty_pid(page); #ifdef WANT_PAGE_VIRTUAL /* The shift won't overflow because ZONE_NORMAL is below 4G. */ if (!is_highmem_idx(zone)) ===== mm/vmscan.c 1.237 vs edited ===== --- 1.237/mm/vmscan.c 2005-01-08 06:44:01 +01:00 +++ edited/mm/vmscan.c 2005-01-12 20:22:18 +01:00 @@ -1161,6 +1161,14 @@ static int kswapd(void *p) */ tsk->flags |= PF_MEMALLOC|PF_KSWAPD; +#if 0 + /* highest prio, RT class */ + tsk->ioprio = 1 << 13; +#else + /* highest best-effort priority */ + tsk->ioprio = (2 << 13) | 0; +#endif + order = 0; for ( ; ; ) { unsigned long new_order; --- /dev/null 2004-10-02 05:17:35.000000000 +0200 +++ linux-2.6/fs/ioprio.c 2004-12-31 15:42:14.000000000 +0100 @@ -0,0 +1,155 @@ +/* + * fs/ioprio.c + * + * Copyright (C) 2004 Jens Axboe + * + * Helper functions for setting/querying io priorities of processes + */ +#include +#include +#include + +static int set_task_ioprio(struct task_struct *task, int ioprio) +{ + struct io_context *ioc; + + if (task->uid != current->euid && + task->uid != current->uid && !capable(CAP_SYS_NICE)) + return -EPERM; + + task->ioprio = ioprio; + + ioc = task->io_context; + if (ioc && ioc->set_ioprio) + ioc->set_ioprio(ioc, ioprio); + + return 0; +} + +asmlinkage int sys_ioprio_set(int which, int who, int ioprio) +{ + int class = IOPRIO_PRIO_CLASS(ioprio); + int data = IOPRIO_PRIO_DATA(ioprio); + struct task_struct *p, *g; + struct user_struct *user; + int ret; + + switch (class) { + case IOPRIO_CLASS_RT: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + /* fall through, rt has prio field too */ + case IOPRIO_CLASS_BE: + if (data >= IOPRIO_BE_NR || data < 0) + return -EINVAL; + + break; + case IOPRIO_CLASS_IDLE: + break; + default: + return -EINVAL; + } + + ret = -ESRCH; + read_lock_irq(&tasklist_lock); + switch (which) { + case IOPRIO_WHO_PROCESS: + if (!who) + p = current; + else + p = find_task_by_pid(who); + if (p) + ret = set_task_ioprio(p, ioprio); + break; + case IOPRIO_WHO_PGRP: + if (!who) + who = process_group(current); + do_each_task_pid(who, PIDTYPE_PGID, p) { + ret = set_task_ioprio(p, ioprio); + if (ret) + break; + } while_each_task_pid(who, PIDTYPE_PGID, p); + break; + case IOPRIO_WHO_USER: + if (!who) + user = current->user; + else + user = find_user(who); + + if (!user) + break; + + do_each_thread(g, p) { + if (p->uid != who) + continue; + ret = set_task_ioprio(p, ioprio); + if (ret) + break; + } while_each_thread(g, p); + + if (who) + free_uid(user); + break; + default: + ret = -EINVAL; + } + + read_unlock_irq(&tasklist_lock); + return ret; +} + +asmlinkage int sys_ioprio_get(int which, int who) +{ + struct task_struct *g, *p; + struct user_struct *user; + int ret = -ESRCH; + + read_lock_irq(&tasklist_lock); + switch (which) { + case IOPRIO_WHO_PROCESS: + if (!who) + p = current; + else + p = find_task_by_pid(who); + if (p) + ret = p->ioprio; + break; + case IOPRIO_WHO_PGRP: + if (!who) + who = process_group(current); + do_each_task_pid(who, PIDTYPE_PGID, p) { + if (ret == -ESRCH) + ret = p->ioprio; + else + ret = ioprio_best(ret, p->ioprio); + } while_each_task_pid(who, PIDTYPE_PGID, p); + break; + case IOPRIO_WHO_USER: + if (!who) + user = current->user; + else + user = find_user(who); + + if (!user) + break; + + do_each_thread(g, p) { + if (p->uid != user->uid) + continue; + if (ret == -ESRCH) + ret = p->ioprio; + else + ret = ioprio_best(ret, p->ioprio); + } while_each_thread(g, p); + + if (who) + free_uid(user); + break; + default: + ret = -EINVAL; + } + + read_unlock_irq(&tasklist_lock); + return ret; +} + --- /dev/null 2004-10-02 05:17:35.000000000 +0200 +++ linux-2.6/include/linux/ioprio.h 2005-01-04 09:08:11.000000000 +0100 @@ -0,0 +1,81 @@ +#ifndef IOPRIO_H +#define IOPRIO_H + +#include + +/* + * Gives us 8 prio classes with 13-bits of data for each class + */ +#define IOPRIO_BITS (16) +#define IOPRIO_CLASS_SHIFT (13) +#define IOPRIO_PRIO_MASK ((1UL << IOPRIO_CLASS_SHIFT) - 1) + +#define IOPRIO_PRIO_CLASS(mask) ((mask) >> IOPRIO_CLASS_SHIFT) +#define IOPRIO_PRIO_DATA(mask) ((mask) & IOPRIO_PRIO_MASK) + +#define ioprio_valid(mask) (IOPRIO_PRIO_CLASS((mask)) != IOPRIO_CLASS_NONE) + +enum { + IOPRIO_CLASS_NONE, + IOPRIO_CLASS_RT, + IOPRIO_CLASS_BE, + IOPRIO_CLASS_IDLE, +}; + +/* + * 8 best effort priority levels are supported + */ +#define IOPRIO_BE_NR (8) + +asmlinkage int sys_ioprio_set(int, int, int); +asmlinkage int sys_ioprio_get(int, int); + +enum { + IOPRIO_WHO_PROCESS = 1, + IOPRIO_WHO_PGRP, + IOPRIO_WHO_USER, +}; + +/* + * if process has set io priority explicitly, use that. if not, convert + * the cpu scheduler nice value to an io priority + */ +#define IOPRIO_NORM (4) +static inline int task_ioprio(struct task_struct *task) +{ + WARN_ON(!ioprio_valid(task->ioprio)); + return IOPRIO_PRIO_DATA(task->ioprio); +} + +static inline int task_nice_ioprio(struct task_struct *task) +{ + return (task_nice(task) + 20) / 5; +} + +/* + * For inheritance, return the highest of the two given priorities + */ +static inline int ioprio_best(unsigned short aprio, unsigned short bprio) +{ + unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); + unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); + + if (!ioprio_valid(aprio)) + return bprio; + if (!ioprio_valid(bprio)) + return aprio; + + if (aclass == IOPRIO_CLASS_NONE) + aclass = IOPRIO_CLASS_BE; + if (bclass == IOPRIO_CLASS_NONE) + bclass = IOPRIO_CLASS_BE; + + if (aclass == bclass) + return min(aprio, bprio); + if (aclass > bclass) + return bprio; + else + return aprio; +} + +#endif