Newer
Older
/*
* CFQ, or complete fairness queueing, disk scheduler.
*
* Based on ideas from a previously unfinished io
* scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
*
* Copyright (C) 2003 Jens Axboe <axboe@suse.de>
*/
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/bio.h>
#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/compiler.h>
#include <linux/hash.h>
#include <linux/rbtree.h>
#include <linux/mempool.h>
#include <linux/ioprio.h>
#include <linux/writeback.h>
static const int cfq_quantum = 4; /* max queue in one round of service */
static const int cfq_queued = 8; /* minimum rq allocate limit per-queue*/
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */
static const int cfq_back_penalty = 2; /* penalty of a backwards seek */
static const int cfq_slice_sync = HZ / 10;
static const int cfq_slice_async_rq = 2;
#define CFQ_IDLE_GRACE (HZ / 10)
#define CFQ_SLICE_SCALE (5)
#define CFQ_KEY_ASYNC (0)
/*
* disable queueing at the driver/hardware level
*/
static const int cfq_max_depth = 2;
static DEFINE_RWLOCK(cfq_exit_lock);
/*
* for the hash of cfqq inside the cfqd
*/
#define CFQ_QHASH_SHIFT 6
#define CFQ_QHASH_ENTRIES (1 << CFQ_QHASH_SHIFT)
#define list_entry_qhash(entry) hlist_entry((entry), struct cfq_queue, cfq_hash)
/*
* for the hash of crq inside the cfqq
*/
#define CFQ_MHASH_SHIFT 6
#define CFQ_MHASH_BLOCK(sec) ((sec) >> 3)
#define CFQ_MHASH_ENTRIES (1 << CFQ_MHASH_SHIFT)
#define CFQ_MHASH_FN(sec) hash_long(CFQ_MHASH_BLOCK(sec), CFQ_MHASH_SHIFT)
#define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors)
#define list_entry_hash(ptr) hlist_entry((ptr), struct cfq_rq, hash)
#define list_entry_cfqq(ptr) list_entry((ptr), struct cfq_queue, cfq_list)
#define list_entry_fifo(ptr) list_entry((ptr), struct request, queuelist)
#define RQ_DATA(rq) (rq)->elevator_private
/*
* rb-tree defines
*/
#define RB_NONE (2)
#define RB_EMPTY(node) ((node)->rb_node == NULL)
#define RB_CLEAR_COLOR(node) (node)->rb_color = RB_NONE
#define RB_CLEAR(node) do { \
(node)->rb_parent = NULL; \
RB_CLEAR_COLOR((node)); \
(node)->rb_right = NULL; \
(node)->rb_left = NULL; \
} while (0)
#define RB_CLEAR_ROOT(root) ((root)->rb_node = NULL)
#define rb_entry_crq(node) rb_entry((node), struct cfq_rq, rb_node)
#define rq_rb_key(rq) (rq)->sector
static kmem_cache_t *crq_pool;
static kmem_cache_t *cfq_pool;
static kmem_cache_t *cfq_ioc_pool;
#define CFQ_PRIO_LISTS IOPRIO_BE_NR
#define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE)
#define cfq_class_rt(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_RT)
#define ASYNC (0)
#define SYNC (1)
#define cfq_cfqq_dispatched(cfqq) \
((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])
#define cfq_cfqq_class_sync(cfqq) ((cfqq)->key != CFQ_KEY_ASYNC)
#define cfq_cfqq_sync(cfqq) \
(cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
/*
* Per block device queue structure
*/
atomic_t ref;
request_queue_t *queue;
/*
* rr list of queues with requests and the count of them
*/
struct list_head rr_list[CFQ_PRIO_LISTS];
struct list_head busy_rr;
struct list_head cur_rr;
struct list_head idle_rr;
unsigned int busy_queues;
/*
* non-ordered list of empty cfqq's
*/
/*
* cfqq lookup hash
*/
/*
* global crq hash for all queues
*/
struct hlist_head *crq_hash;
/*
* schedule slice state info
*/
/*
* idle window management
*/
struct timer_list idle_slice_timer;
struct work_struct unplug_work;
struct cfq_queue *active_queue;
struct cfq_io_context *active_cic;
int cur_prio, cur_end_prio;
unsigned int dispatch_slice;
struct timer_list idle_class_timer;
unsigned long last_end_request;
unsigned int rq_starved;
/*
* tunables, see top of file
*/
unsigned int cfq_quantum;
unsigned int cfq_queued;
unsigned int cfq_fifo_expire[2];
unsigned int cfq_back_penalty;
unsigned int cfq_back_max;
unsigned int cfq_slice[2];
unsigned int cfq_slice_async_rq;
unsigned int cfq_slice_idle;
unsigned int cfq_max_depth;
struct list_head cic_list;
/*
* Per process-grouping structure
*/
struct cfq_queue {
/* reference count */
atomic_t ref;
/* parent cfq_data */
struct cfq_data *cfqd;
/* cfqq lookup hash */
/* on either rr or empty list of cfqd */
struct list_head cfq_list;
/* sorted list of pending requests */
struct rb_root sort_list;
/* if fifo isn't expired, next request to serve */
struct cfq_rq *next_crq;
/* requests queued in sort_list */
int queued[2];
/* currently allocated requests */
int allocated[2];
/* fifo list of requests in sort_list */
struct list_head fifo;
unsigned long slice_start;
unsigned long slice_end;
unsigned long slice_left;
unsigned long service_last;
/* number of requests that are on the dispatch list */
int on_dispatch[2];
/* io prio of this group */
unsigned short ioprio, org_ioprio;
unsigned short ioprio_class, org_ioprio_class;
/* various state flags, see below */
unsigned int flags;
};
struct cfq_rq {
struct rb_node rb_node;
sector_t rb_key;
struct request *request;
struct hlist_node hash;
struct cfq_queue *cfq_queue;
struct cfq_io_context *io_context;
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
enum cfqq_state_flags {
CFQ_CFQQ_FLAG_on_rr = 0,
CFQ_CFQQ_FLAG_wait_request,
CFQ_CFQQ_FLAG_must_alloc,
CFQ_CFQQ_FLAG_must_alloc_slice,
CFQ_CFQQ_FLAG_must_dispatch,
CFQ_CFQQ_FLAG_fifo_expire,
CFQ_CFQQ_FLAG_idle_window,
CFQ_CFQQ_FLAG_prio_changed,
};
#define CFQ_CFQQ_FNS(name) \
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \
{ \
cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \
} \
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \
{ \
cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \
} \
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
{ \
return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \
}
CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
CFQ_CFQQ_FNS(must_alloc);
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(must_dispatch);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
#undef CFQ_CFQQ_FNS
enum cfq_rq_state_flags {
CFQ_CRQ_FLAG_is_sync = 0,
};
#define CFQ_CRQ_FNS(name) \
static inline void cfq_mark_crq_##name(struct cfq_rq *crq) \
{ \
crq->crq_flags |= (1 << CFQ_CRQ_FLAG_##name); \
} \
static inline void cfq_clear_crq_##name(struct cfq_rq *crq) \
{ \
crq->crq_flags &= ~(1 << CFQ_CRQ_FLAG_##name); \
} \
static inline int cfq_crq_##name(const struct cfq_rq *crq) \
{ \
return (crq->crq_flags & (1 << CFQ_CRQ_FLAG_##name)) != 0; \
}
CFQ_CRQ_FNS(is_sync);
#undef CFQ_CRQ_FNS
static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *);
#define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE)
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
/*
* lots of deadline iosched dupes, can be abstracted later...
*/
static inline void cfq_del_crq_hash(struct cfq_rq *crq)
{
hlist_del_init(&crq->hash);
}
static inline void cfq_add_crq_hash(struct cfq_data *cfqd, struct cfq_rq *crq)
{
const int hash_idx = CFQ_MHASH_FN(rq_hash_key(crq->request));
hlist_add_head(&crq->hash, &cfqd->crq_hash[hash_idx]);
}
static struct request *cfq_find_rq_hash(struct cfq_data *cfqd, sector_t offset)
{
struct hlist_head *hash_list = &cfqd->crq_hash[CFQ_MHASH_FN(offset)];
struct hlist_node *entry, *next;
hlist_for_each_safe(entry, next, hash_list) {
struct cfq_rq *crq = list_entry_hash(entry);
struct request *__rq = crq->request;
if (!rq_mergeable(__rq)) {
cfq_del_crq_hash(crq);
continue;
}
if (rq_hash_key(__rq) == offset)
return __rq;
}
return NULL;
}
/*
* scheduler run of queue, if there are requests pending and no one in the
* driver that will restart queueing
*/
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
kblockd_schedule_work(&cfqd->unplug_work);
}
static int cfq_queue_empty(request_queue_t *q)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
return !cfqd->busy_queues;
/*
* Lifted from AS - choose which of crq1 and crq2 that is best served now.
* We choose the request that is closest to the head right now. Distance
* behind the head are penalized and only allowed to a certain extent.
*/
static struct cfq_rq *
cfq_choose_req(struct cfq_data *cfqd, struct cfq_rq *crq1, struct cfq_rq *crq2)
{
sector_t last, s1, s2, d1 = 0, d2 = 0;
int r1_wrap = 0, r2_wrap = 0; /* requests are behind the disk head */
unsigned long back_max;
if (crq1 == NULL || crq1 == crq2)
return crq2;
if (crq2 == NULL)
return crq1;
if (cfq_crq_is_sync(crq1) && !cfq_crq_is_sync(crq2))
return crq1;
else if (cfq_crq_is_sync(crq2) && !cfq_crq_is_sync(crq1))
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
s1 = crq1->request->sector;
s2 = crq2->request->sector;
last = cfqd->last_sector;
/*
* by definition, 1KiB is 2 sectors
*/
back_max = cfqd->cfq_back_max * 2;
/*
* Strict one way elevator _except_ in the case where we allow
* short backward seeks which are biased as twice the cost of a
* similar forward seek.
*/
if (s1 >= last)
d1 = s1 - last;
else if (s1 + back_max >= last)
d1 = (last - s1) * cfqd->cfq_back_penalty;
else
r1_wrap = 1;
if (s2 >= last)
d2 = s2 - last;
else if (s2 + back_max >= last)
d2 = (last - s2) * cfqd->cfq_back_penalty;
else
r2_wrap = 1;
/* Found required data */
if (!r1_wrap && r2_wrap)
return crq1;
else if (!r2_wrap && r1_wrap)
return crq2;
else if (r1_wrap && r2_wrap) {
/* both behind the head */
if (s1 <= s2)
return crq1;
else
return crq2;
}
/* Both requests in front of the head */
if (d1 < d2)
return crq1;
else if (d2 < d1)
return crq2;
else {
if (s1 >= s2)
return crq1;
else
return crq2;
}
}
/*
* would be nice to take fifo expire time into account as well
*/
static struct cfq_rq *
cfq_find_next_crq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
struct cfq_rq *last)
{
struct cfq_rq *crq_next = NULL, *crq_prev = NULL;
struct rb_node *rbnext, *rbprev;
if (!(rbnext = rb_next(&last->rb_node))) {
if (rbnext == &last->rb_node)
rbnext = NULL;
}
rbprev = rb_prev(&last->rb_node);
if (rbprev)
crq_prev = rb_entry_crq(rbprev);
if (rbnext)
crq_next = rb_entry_crq(rbnext);
return cfq_choose_req(cfqd, crq_next, crq_prev);
}
static void cfq_update_next_crq(struct cfq_rq *crq)
{
struct cfq_queue *cfqq = crq->cfq_queue;
if (cfqq->next_crq == crq)
cfqq->next_crq = cfq_find_next_crq(cfqq->cfqd, cfqq, crq);
}
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
struct cfq_data *cfqd = cfqq->cfqd;
struct list_head *list, *entry;
list_del(&cfqq->cfq_list);
if (cfq_class_rt(cfqq))
list = &cfqd->cur_rr;
else if (cfq_class_idle(cfqq))
list = &cfqd->idle_rr;
else {
/*
* if cfqq has requests in flight, don't allow it to be
* found in cfq_set_active_queue before it has finished them.
* this is done to increase fairness between a process that
* has lots of io pending vs one that only generates one
* sporadically or synchronously
*/
list = &cfqd->busy_rr;
else
list = &cfqd->rr_list[cfqq->ioprio];
/*
* if queue was preempted, just add to front to be fair. busy_rr
* isn't sorted.
*/
if (preempted || list == &cfqd->busy_rr) {
list_add(&cfqq->cfq_list, list);
* sort by when queue was last serviced
entry = list;
while ((entry = entry->prev) != list) {
struct cfq_queue *__cfqq = list_entry_cfqq(entry);
if (!__cfqq->service_last)
break;
if (time_before(__cfqq->service_last, cfqq->service_last))
break;
}
list_add(&cfqq->cfq_list, entry);
}
/*
* add to busy list of queues for service, trying to be fair in ordering
* the pending list according to last request service
cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(cfq_cfqq_on_rr(cfqq));
cfq_mark_cfqq_on_rr(cfqq);
cfq_resort_rr_list(cfqq, 0);
}
static inline void
cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
BUG_ON(!cfq_cfqq_on_rr(cfqq));
cfq_clear_cfqq_on_rr(cfqq);
list_move(&cfqq->cfq_list, &cfqd->empty_list);
BUG_ON(!cfqd->busy_queues);
cfqd->busy_queues--;
}
/*
* rb tree support functions
*/
static inline void cfq_del_crq_rb(struct cfq_rq *crq)
{
struct cfq_queue *cfqq = crq->cfq_queue;
struct cfq_data *cfqd = cfqq->cfqd;
const int sync = cfq_crq_is_sync(crq);
BUG_ON(!cfqq->queued[sync]);
cfqq->queued[sync]--;
cfq_update_next_crq(crq);
rb_erase(&crq->rb_node, &cfqq->sort_list);
RB_CLEAR_COLOR(&crq->rb_node);
if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY(&cfqq->sort_list))
cfq_del_cfqq_rr(cfqd, cfqq);
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
}
static struct cfq_rq *
__cfq_add_crq_rb(struct cfq_rq *crq)
{
struct rb_node **p = &crq->cfq_queue->sort_list.rb_node;
struct rb_node *parent = NULL;
struct cfq_rq *__crq;
while (*p) {
parent = *p;
__crq = rb_entry_crq(parent);
if (crq->rb_key < __crq->rb_key)
p = &(*p)->rb_left;
else if (crq->rb_key > __crq->rb_key)
p = &(*p)->rb_right;
else
return __crq;
}
rb_link_node(&crq->rb_node, parent, p);
return NULL;
}
static void cfq_add_crq_rb(struct cfq_rq *crq)
{
struct cfq_queue *cfqq = crq->cfq_queue;
struct cfq_data *cfqd = cfqq->cfqd;
struct request *rq = crq->request;
struct cfq_rq *__alias;
crq->rb_key = rq_rb_key(rq);
/*
* looks a little odd, but the first insert might return an alias.
* if that happens, put the alias on the dispatch list
*/
while ((__alias = __cfq_add_crq_rb(crq)) != NULL)
cfq_dispatch_insert(cfqd->queue, __alias);
rb_insert_color(&crq->rb_node, &cfqq->sort_list);
cfq_add_cfqq_rr(cfqd, cfqq);
/*
* check if this request is a better next-serve candidate
*/
cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);
}
static inline void
cfq_reposition_crq_rb(struct cfq_queue *cfqq, struct cfq_rq *crq)
{
rb_erase(&crq->rb_node, &cfqq->sort_list);
cfqq->queued[cfq_crq_is_sync(crq)]--;
static struct request *cfq_find_rq_rb(struct cfq_data *cfqd, sector_t sector)
struct cfq_queue *cfqq = cfq_find_cfq_hash(cfqd, current->pid, CFQ_KEY_ANY);
struct rb_node *n;
if (!cfqq)
goto out;
n = cfqq->sort_list.rb_node;
while (n) {
struct cfq_rq *crq = rb_entry_crq(n);
if (sector < crq->rb_key)
n = n->rb_left;
else if (sector > crq->rb_key)
n = n->rb_right;
else
return crq->request;
}
out:
return NULL;
}
static void cfq_activate_request(request_queue_t *q, struct request *rq)
struct cfq_data *cfqd = q->elevator->elevator_data;
cfqd->rq_in_driver++;
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
struct cfq_data *cfqd = q->elevator->elevator_data;
WARN_ON(!cfqd->rq_in_driver);
cfqd->rq_in_driver--;
static void cfq_remove_request(struct request *rq)
list_del_init(&rq->queuelist);
cfq_del_crq_rb(crq);
cfq_del_crq_hash(crq);
}
static int
cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct request *__rq;
int ret;
__rq = cfq_find_rq_hash(cfqd, bio->bi_sector);
if (__rq && elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_BACK_MERGE;
goto out;
}
__rq = cfq_find_rq_rb(cfqd, bio->bi_sector + bio_sectors(bio));
if (__rq && elv_rq_merge_ok(__rq, bio)) {
ret = ELEVATOR_FRONT_MERGE;
goto out;
}
return ELEVATOR_NO_MERGE;
out:
*req = __rq;
return ret;
}
static void cfq_merged_request(request_queue_t *q, struct request *req)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_rq *crq = RQ_DATA(req);
cfq_del_crq_hash(crq);
cfq_add_crq_hash(cfqd, crq);
if (rq_rb_key(req) != crq->rb_key) {
struct cfq_queue *cfqq = crq->cfq_queue;
cfq_update_next_crq(crq);
cfq_reposition_crq_rb(cfqq, crq);
}
}
static void
cfq_merged_requests(request_queue_t *q, struct request *rq,
struct request *next)
{
cfq_merged_request(q, rq);
/*
* reposition in fifo if next is older than rq
*/
if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
time_before(next->start_time, rq->start_time))
list_move(&rq->queuelist, &next->queuelist);
cfq_remove_request(next);
}
static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
if (cfqq) {
/*
* stop potential idle class queues waiting service
*/
del_timer(&cfqd->idle_class_timer);
cfqq->slice_start = jiffies;
cfqq->slice_end = 0;
cfqq->slice_left = 0;
cfq_clear_cfqq_must_alloc_slice(cfqq);
cfq_clear_cfqq_fifo_expire(cfqq);
}
cfqd->active_queue = cfqq;
}
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
/*
* current cfqq expired its slice (or was too idle), select new one
*/
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
int preempted)
{
unsigned long now = jiffies;
if (cfq_cfqq_wait_request(cfqq))
del_timer(&cfqd->idle_slice_timer);
if (!preempted && !cfq_cfqq_dispatched(cfqq)) {
cfqq->service_last = now;
cfq_schedule_dispatch(cfqd);
}
cfq_clear_cfqq_must_dispatch(cfqq);
cfq_clear_cfqq_wait_request(cfqq);
/*
* store what was left of this slice, if the queue idled out
* or was preempted
*/
if (time_after(cfqq->slice_end, now))
cfqq->slice_left = cfqq->slice_end - now;
else
cfqq->slice_left = 0;
if (cfq_cfqq_on_rr(cfqq))
cfq_resort_rr_list(cfqq, preempted);
if (cfqq == cfqd->active_queue)
cfqd->active_queue = NULL;
if (cfqd->active_cic) {
put_io_context(cfqd->active_cic->ioc);
cfqd->active_cic = NULL;
}
cfqd->dispatch_slice = 0;
}
static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
{
struct cfq_queue *cfqq = cfqd->active_queue;
if (cfqq)
__cfq_slice_expired(cfqd, cfqq, preempted);
}
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
/*
* 0
* 0,1
* 0,1,2
* 0,1,2,3
* 0,1,2,3,4
* 0,1,2,3,4,5
* 0,1,2,3,4,5,6
* 0,1,2,3,4,5,6,7
*/
static int cfq_get_next_prio_level(struct cfq_data *cfqd)
{
int prio, wrap;
prio = -1;
wrap = 0;
do {
int p;
for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
if (!list_empty(&cfqd->rr_list[p])) {
prio = p;
break;
}
}
if (prio != -1)
break;
cfqd->cur_prio = 0;
if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
cfqd->cur_end_prio = 0;
if (wrap)
break;
wrap = 1;
} while (1);
if (unlikely(prio == -1))
return -1;
BUG_ON(prio >= CFQ_PRIO_LISTS);
list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);
cfqd->cur_prio = prio + 1;
if (cfqd->cur_prio > cfqd->cur_end_prio) {
cfqd->cur_end_prio = cfqd->cur_prio;
cfqd->cur_prio = 0;
}
if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
cfqd->cur_prio = 0;
cfqd->cur_end_prio = 0;
return prio;
}
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
/*
* if current list is non-empty, grab first entry. if it is empty,
* get next prio level and grab first entry then if any are spliced
*/
if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1)
cfqq = list_entry_cfqq(cfqd->cur_rr.next);
/*
* if we have idle queues and no rt or be queues had pending
* requests, either allow immediate service if the grace period
* has passed or arm the idle grace timer
*/
if (!cfqq && !list_empty(&cfqd->idle_rr)) {
unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;
if (time_after_eq(jiffies, end))
cfqq = list_entry_cfqq(cfqd->idle_rr.next);
else
mod_timer(&cfqd->idle_class_timer, end);
}
__cfq_set_active_queue(cfqd, cfqq);
}
static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
WARN_ON(!RB_EMPTY(&cfqq->sort_list));
WARN_ON(cfqq != cfqd->active_queue);
/*
* idle is disabled, either manually or by past process history
*/
if (!cfqd->cfq_slice_idle)
return 0;
return 0;
/*
* task has exited, don't wait
*/
if (cfqd->active_cic && !cfqd->active_cic->ioc->task)
return 0;
cfq_mark_cfqq_must_dispatch(cfqq);
cfq_mark_cfqq_wait_request(cfqq);
sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);
mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
static void cfq_dispatch_insert(request_queue_t *q, struct cfq_rq *crq)
{
struct cfq_data *cfqd = q->elevator->elevator_data;
struct cfq_queue *cfqq = crq->cfq_queue;
cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);
cfq_remove_request(crq->request);
cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
elv_dispatch_sort(q, crq->request);
}
/*
* return expired entry, or NULL to just start from scratch in rbtree
*/
static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
{
struct cfq_data *cfqd = cfqq->cfqd;
if (!list_empty(&cfqq->fifo)) {
crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
rq = crq->request;
if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
* Scale schedule slice based on io priority. Use the sync time slice only
* if a queue is marked sync and has sync io queued. A sync queue with async
* io only, should not get full sync slice length.
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];
WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
}
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
}
static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
const int base_rq = cfqd->cfq_slice_async_rq;
WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
/*
* get next queue for service
*/
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
unsigned long now = jiffies;
cfqq = cfqd->active_queue;
if (!cfqq)
goto new_queue;
/*
* slice has expired
*/
if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
goto expire;
/*
* if queue has requests, dispatch one. if not, check if
* enough slice is left to wait for one