Skip to content
cfq-iosched.c 53 KiB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
/*
 *  CFQ, or complete fairness queueing, disk scheduler.
 *
 *  Based on ideas from a previously unfinished io
 *  scheduler (round robin per-process disk scheduling) and Andrea Arcangeli.
 *
 *  Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
Linus Torvalds's avatar
Linus Torvalds committed
 */
#include <linux/module.h>
#include <linux/blkdev.h>
#include <linux/elevator.h>
Linus Torvalds's avatar
Linus Torvalds committed
#include <linux/hash.h>
#include <linux/rbtree.h>
#include <linux/ioprio.h>
Linus Torvalds's avatar
Linus Torvalds committed

/*
 * tunables
 */
static const int cfq_quantum = 4;		/* max queue in one round of service */
static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 };
static const int cfq_back_max = 16 * 1024;	/* maximum backwards seek, in KiB */
static const int cfq_back_penalty = 2;		/* penalty of a backwards seek */
Linus Torvalds's avatar
Linus Torvalds committed

static const int cfq_slice_sync = HZ / 10;
static int cfq_slice_async = HZ / 25;
static const int cfq_slice_async_rq = 2;
static int cfq_slice_idle = HZ / 125;

#define CFQ_IDLE_GRACE		(HZ / 10)
#define CFQ_SLICE_SCALE		(5)

#define CFQ_KEY_ASYNC		(0)

Linus Torvalds's avatar
Linus Torvalds committed
/*
 * for the hash of cfqq inside the cfqd
 */
#define CFQ_QHASH_SHIFT		6
#define CFQ_QHASH_ENTRIES	(1 << CFQ_QHASH_SHIFT)
#define list_entry_qhash(entry)	hlist_entry((entry), struct cfq_queue, cfq_hash)

#define list_entry_cfqq(ptr)	list_entry((ptr), struct cfq_queue, cfq_list)

#define RQ_CIC(rq)		((struct cfq_io_context*)(rq)->elevator_private)
#define RQ_CFQQ(rq)		((rq)->elevator_private2)
Linus Torvalds's avatar
Linus Torvalds committed

static struct kmem_cache *cfq_pool;
static struct kmem_cache *cfq_ioc_pool;
Linus Torvalds's avatar
Linus Torvalds committed

static DEFINE_PER_CPU(unsigned long, ioc_count);
static struct completion *ioc_gone;

#define CFQ_PRIO_LISTS		IOPRIO_BE_NR
#define cfq_class_idle(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define cfq_class_rt(cfqq)	((cfqq)->ioprio_class == IOPRIO_CLASS_RT)

#define ASYNC			(0)
#define SYNC			(1)

#define cfq_cfqq_dispatched(cfqq)	\
	((cfqq)->on_dispatch[ASYNC] + (cfqq)->on_dispatch[SYNC])

#define cfq_cfqq_class_sync(cfqq)	((cfqq)->key != CFQ_KEY_ASYNC)

#define cfq_cfqq_sync(cfqq)		\
	(cfq_cfqq_class_sync(cfqq) || (cfqq)->on_dispatch[SYNC])
#define sample_valid(samples)	((samples) > 80)

/*
 * Per block device queue structure
 */
Linus Torvalds's avatar
Linus Torvalds committed
struct cfq_data {
	request_queue_t *queue;

	/*
	 * rr list of queues with requests and the count of them
	 */
	struct list_head rr_list[CFQ_PRIO_LISTS];
	struct list_head busy_rr;
	struct list_head cur_rr;
	struct list_head idle_rr;
	unsigned int busy_queues;

	/*
	 * cfqq lookup hash
	 */
Linus Torvalds's avatar
Linus Torvalds committed
	struct hlist_head *cfq_hash;

Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * idle window management
	 */
	struct timer_list idle_slice_timer;
	struct work_struct unplug_work;
Linus Torvalds's avatar
Linus Torvalds committed

	struct cfq_queue *active_queue;
	struct cfq_io_context *active_cic;
	int cur_prio, cur_end_prio;
	unsigned int dispatch_slice;

	struct timer_list idle_class_timer;
Linus Torvalds's avatar
Linus Torvalds committed

	sector_t last_sector;
	unsigned long last_end_request;
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * tunables, see top of file
	 */
	unsigned int cfq_quantum;
	unsigned int cfq_fifo_expire[2];
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned int cfq_back_penalty;
	unsigned int cfq_back_max;
	unsigned int cfq_slice[2];
	unsigned int cfq_slice_async_rq;
	unsigned int cfq_slice_idle;
Linus Torvalds's avatar
Linus Torvalds committed
};

/*
 * Per process-grouping structure
 */
Linus Torvalds's avatar
Linus Torvalds committed
struct cfq_queue {
	/* reference count */
	atomic_t ref;
	/* parent cfq_data */
	struct cfq_data *cfqd;
	/* cfqq lookup hash */
Linus Torvalds's avatar
Linus Torvalds committed
	struct hlist_node cfq_hash;
	/* hash key */
	/* member of the rr/busy/cur/idle cfqd list */
Linus Torvalds's avatar
Linus Torvalds committed
	struct list_head cfq_list;
	/* sorted list of pending requests */
	struct rb_root sort_list;
	/* if fifo isn't expired, next request to serve */
	struct request *next_rq;
Linus Torvalds's avatar
Linus Torvalds committed
	/* requests queued in sort_list */
	int queued[2];
	/* currently allocated requests */
	int allocated[2];
	/* pending metadata requests */
	int meta_pending;
Linus Torvalds's avatar
Linus Torvalds committed
	/* fifo list of requests in sort_list */
	struct list_head fifo;
Linus Torvalds's avatar
Linus Torvalds committed

	unsigned long slice_end;
	unsigned long slice_left;
Linus Torvalds's avatar
Linus Torvalds committed

	/* number of requests that are on the dispatch list */
	int on_dispatch[2];

	/* io prio of this group */
	unsigned short ioprio, org_ioprio;
	unsigned short ioprio_class, org_ioprio_class;

	/* various state flags, see below */
	unsigned int flags;
Linus Torvalds's avatar
Linus Torvalds committed
};

enum cfqq_state_flags {
	CFQ_CFQQ_FLAG_on_rr = 0,	/* on round-robin busy list */
	CFQ_CFQQ_FLAG_wait_request,	/* waiting for a request */
	CFQ_CFQQ_FLAG_must_alloc,	/* must be allowed rq alloc */
	CFQ_CFQQ_FLAG_must_alloc_slice,	/* per-slice must_alloc flag */
	CFQ_CFQQ_FLAG_must_dispatch,	/* must dispatch, even if expired */
	CFQ_CFQQ_FLAG_fifo_expire,	/* FIFO checked in this slice */
	CFQ_CFQQ_FLAG_idle_window,	/* slice idling enabled */
	CFQ_CFQQ_FLAG_prio_changed,	/* task priority has changed */
	CFQ_CFQQ_FLAG_queue_new,	/* queue never been serviced */
	CFQ_CFQQ_FLAG_slice_new,	/* no requests dispatched in slice */
};

#define CFQ_CFQQ_FNS(name)						\
static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq)		\
{									\
	cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq)	\
{									\
	cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name);			\
}									\
static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq)		\
{									\
	return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0;	\
}

CFQ_CFQQ_FNS(on_rr);
CFQ_CFQQ_FNS(wait_request);
CFQ_CFQQ_FNS(must_alloc);
CFQ_CFQQ_FNS(must_alloc_slice);
CFQ_CFQQ_FNS(must_dispatch);
CFQ_CFQQ_FNS(fifo_expire);
CFQ_CFQQ_FNS(idle_window);
CFQ_CFQQ_FNS(prio_changed);
#undef CFQ_CFQQ_FNS

static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short);
static void cfq_dispatch_insert(request_queue_t *, struct request *);
static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed

Andrew Morton's avatar
Andrew Morton committed
/*
 * scheduler run of queue, if there are requests pending and no one in the
 * driver that will restart queueing
 */
static inline void cfq_schedule_dispatch(struct cfq_data *cfqd)
{
	if (cfqd->busy_queues)
Andrew Morton's avatar
Andrew Morton committed
		kblockd_schedule_work(&cfqd->unplug_work);
}

static int cfq_queue_empty(request_queue_t *q)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

	return !cfqd->busy_queues;
static inline pid_t cfq_queue_pid(struct task_struct *task, int rw, int is_sync)
	/*
	 * Use the per-process queue, for read requests and syncronous writes
	 */
	if (!(rw & REQ_RW) || is_sync)
		return task->pid;

	return CFQ_KEY_ASYNC;
}

/*
 * Scale schedule slice based on io priority. Use the sync time slice only
 * if a queue is marked sync and has sync io queued. A sync queue with async
 * io only, should not get full sync slice length.
 */
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];

	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);

	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
}

static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
}

/*
 * We need to wrap this check in cfq_cfqq_slice_new(), since ->slice_end
 * isn't valid until the first request from the dispatch is activated
 * and the slice time set.
 */
static inline int cfq_slice_used(struct cfq_queue *cfqq)
{
	if (cfq_cfqq_slice_new(cfqq))
		return 0;
	if (time_before(jiffies, cfqq->slice_end))
		return 0;

	return 1;
}

Linus Torvalds's avatar
Linus Torvalds committed
/*
 * Lifted from AS - choose which of rq1 and rq2 that is best served now.
Linus Torvalds's avatar
Linus Torvalds committed
 * We choose the request that is closest to the head right now. Distance
 * behind the head is penalized and only allowed to a certain extent.
Linus Torvalds's avatar
Linus Torvalds committed
 */
static struct request *
cfq_choose_req(struct cfq_data *cfqd, struct request *rq1, struct request *rq2)
Linus Torvalds's avatar
Linus Torvalds committed
{
	sector_t last, s1, s2, d1 = 0, d2 = 0;
	unsigned long back_max;
#define CFQ_RQ1_WRAP	0x01 /* request 1 wraps */
#define CFQ_RQ2_WRAP	0x02 /* request 2 wraps */
	unsigned wrap = 0; /* bit mask: requests behind the disk head? */
Linus Torvalds's avatar
Linus Torvalds committed

	if (rq1 == NULL || rq1 == rq2)
		return rq2;
	if (rq2 == NULL)
		return rq1;
	if (rq_is_sync(rq1) && !rq_is_sync(rq2))
		return rq1;
	else if (rq_is_sync(rq2) && !rq_is_sync(rq1))
		return rq2;
	if (rq_is_meta(rq1) && !rq_is_meta(rq2))
		return rq1;
	else if (rq_is_meta(rq2) && !rq_is_meta(rq1))
		return rq2;
Linus Torvalds's avatar
Linus Torvalds committed

	s1 = rq1->sector;
	s2 = rq2->sector;
Linus Torvalds's avatar
Linus Torvalds committed

	last = cfqd->last_sector;

	/*
	 * by definition, 1KiB is 2 sectors
	 */
	back_max = cfqd->cfq_back_max * 2;

	/*
	 * Strict one way elevator _except_ in the case where we allow
	 * short backward seeks which are biased as twice the cost of a
	 * similar forward seek.
	 */
	if (s1 >= last)
		d1 = s1 - last;
	else if (s1 + back_max >= last)
		d1 = (last - s1) * cfqd->cfq_back_penalty;
	else
Linus Torvalds's avatar
Linus Torvalds committed

	if (s2 >= last)
		d2 = s2 - last;
	else if (s2 + back_max >= last)
		d2 = (last - s2) * cfqd->cfq_back_penalty;
	else
Linus Torvalds's avatar
Linus Torvalds committed

	/* Found required data */

	/*
	 * By doing switch() on the bit mask "wrap" we avoid having to
	 * check two variables for all permutations: --> faster!
	 */
	switch (wrap) {
	case 0: /* common case for CFQ: rq1 and rq2 not wrapped */
			return rq1;
			return rq2;
				return rq1;
				return rq2;
Linus Torvalds's avatar
Linus Torvalds committed

		return rq1;
		return rq2;
	case (CFQ_RQ1_WRAP|CFQ_RQ2_WRAP): /* both rqs wrapped */
	default:
		/*
		 * Since both rqs are wrapped,
		 * start with the one that's further behind head
		 * (--> only *one* back seek required),
		 * since back seek takes more time than forward.
		 */
		if (s1 <= s2)
			return rq1;
Linus Torvalds's avatar
Linus Torvalds committed
		else
			return rq2;
Linus Torvalds's avatar
Linus Torvalds committed
	}
}

/*
 * would be nice to take fifo expire time into account as well
 */
static struct request *
cfq_find_next_rq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		  struct request *last)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct rb_node *rbnext = rb_next(&last->rb_node);
	struct rb_node *rbprev = rb_prev(&last->rb_node);
	struct request *next = NULL, *prev = NULL;
Linus Torvalds's avatar
Linus Torvalds committed

	BUG_ON(RB_EMPTY_NODE(&last->rb_node));
Linus Torvalds's avatar
Linus Torvalds committed

	if (rbprev)
		prev = rb_entry_rq(rbprev);
Linus Torvalds's avatar
Linus Torvalds committed

		next = rb_entry_rq(rbnext);
	else {
		rbnext = rb_first(&cfqq->sort_list);
		if (rbnext && rbnext != &last->rb_node)
			next = rb_entry_rq(rbnext);
Linus Torvalds's avatar
Linus Torvalds committed

	return cfq_choose_req(cfqd, next, prev);
static void cfq_resort_rr_list(struct cfq_queue *cfqq, int preempted)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_data *cfqd = cfqq->cfqd;
	struct list_head *list, *n;
	struct cfq_queue *__cfqq;
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * Resorting requires the cfqq to be on the RR list already.
	 */
	if (!cfq_cfqq_on_rr(cfqq))
		return;
Linus Torvalds's avatar
Linus Torvalds committed

	list_del(&cfqq->cfq_list);
Linus Torvalds's avatar
Linus Torvalds committed

	if (cfq_class_rt(cfqq))
		list = &cfqd->cur_rr;
	else if (cfq_class_idle(cfqq))
		list = &cfqd->idle_rr;
	else {
		/*
		 * if cfqq has requests in flight, don't allow it to be
		 * found in cfq_set_active_queue before it has finished them.
		 * this is done to increase fairness between a process that
		 * has lots of io pending vs one that only generates one
		 * sporadically or synchronously
		 */
		if (cfq_cfqq_dispatched(cfqq))
			list = &cfqd->busy_rr;
		else
			list = &cfqd->rr_list[cfqq->ioprio];
	if (preempted || cfq_cfqq_queue_new(cfqq)) {
		/*
		 * If this queue was preempted or is new (never been serviced),
		 * let it be added first for fairness but beind other new
		 * queues.
		 */
		n = list;
		while (n->next != list) {
			__cfqq = list_entry_cfqq(n->next);
			if (!cfq_cfqq_queue_new(__cfqq))
				break;
Linus Torvalds's avatar
Linus Torvalds committed

		list_add_tail(&cfqq->cfq_list, n);
	} else if (!cfq_cfqq_class_sync(cfqq)) {
		/*
		 * async queue always goes to the end. this wont be overly
		 * unfair to writes, as the sort of the sync queue wont be
		 * allowed to pass the async queue again.
		 */
		list_add_tail(&cfqq->cfq_list, list);
	} else {
		/*
		 * sort by last service, but don't cross a new or async
		 * queue. we don't cross a new queue because it hasn't been
		 * service before, and we don't cross an async queue because
		 * it gets added to the end on expire.
		 */
		n = list;
		while ((n = n->prev) != list) {
			struct cfq_queue *__cfqq = list_entry_cfqq(n);
Linus Torvalds's avatar
Linus Torvalds committed

			if (!cfq_cfqq_class_sync(cfqq) || !__cfqq->service_last)
				break;
			if (time_before(__cfqq->service_last, cfqq->service_last))
				break;
		}
		list_add(&cfqq->cfq_list, n);
Linus Torvalds's avatar
Linus Torvalds committed
	}
}

/*
 * add to busy list of queues for service, trying to be fair in ordering
 * the pending list according to last request service
Linus Torvalds's avatar
Linus Torvalds committed
 */
static inline void
cfq_add_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	BUG_ON(cfq_cfqq_on_rr(cfqq));
	cfq_mark_cfqq_on_rr(cfqq);
Linus Torvalds's avatar
Linus Torvalds committed
	cfqd->busy_queues++;

	cfq_resort_rr_list(cfqq, 0);
Linus Torvalds's avatar
Linus Torvalds committed
}

static inline void
cfq_del_cfqq_rr(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	BUG_ON(!cfq_cfqq_on_rr(cfqq));
	cfq_clear_cfqq_on_rr(cfqq);
	list_del_init(&cfqq->cfq_list);
Linus Torvalds's avatar
Linus Torvalds committed

	BUG_ON(!cfqd->busy_queues);
	cfqd->busy_queues--;
}

/*
 * rb tree support functions
 */
static inline void cfq_del_rq_rb(struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
	struct cfq_data *cfqd = cfqq->cfqd;
	const int sync = rq_is_sync(rq);
Linus Torvalds's avatar
Linus Torvalds committed

	BUG_ON(!cfqq->queued[sync]);
	cfqq->queued[sync]--;
Linus Torvalds's avatar
Linus Torvalds committed

	elv_rb_del(&cfqq->sort_list, rq);
Linus Torvalds's avatar
Linus Torvalds committed

	if (cfq_cfqq_on_rr(cfqq) && RB_EMPTY_ROOT(&cfqq->sort_list))
		cfq_del_cfqq_rr(cfqd, cfqq);
static void cfq_add_rq_rb(struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
Linus Torvalds's avatar
Linus Torvalds committed
	struct cfq_data *cfqd = cfqq->cfqd;
Linus Torvalds's avatar
Linus Torvalds committed

	cfqq->queued[rq_is_sync(rq)]++;
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * looks a little odd, but the first insert might return an alias.
	 * if that happens, put the alias on the dispatch list
	 */
	while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL)
		cfq_dispatch_insert(cfqd->queue, __alias);

	if (!cfq_cfqq_on_rr(cfqq))
		cfq_add_cfqq_rr(cfqd, cfqq);
Linus Torvalds's avatar
Linus Torvalds committed
}

static inline void
cfq_reposition_rq_rb(struct cfq_queue *cfqq, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	elv_rb_del(&cfqq->sort_list, rq);
	cfqq->queued[rq_is_sync(rq)]--;
	cfq_add_rq_rb(rq);
static struct request *
cfq_find_rq_fmerge(struct cfq_data *cfqd, struct bio *bio)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct task_struct *tsk = current;
	pid_t key = cfq_queue_pid(tsk, bio_data_dir(bio), bio_sync(bio));
	struct cfq_queue *cfqq;
Linus Torvalds's avatar
Linus Torvalds committed

	cfqq = cfq_find_cfq_hash(cfqd, key, tsk->ioprio);
	if (cfqq) {
		sector_t sector = bio->bi_sector + bio_sectors(bio);

		return elv_rb_find(&cfqq->sort_list, sector);
Linus Torvalds's avatar
Linus Torvalds committed

	return NULL;
}

static void cfq_activate_request(request_queue_t *q, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

	/*
	 * If the depth is larger 1, it really could be queueing. But lets
	 * make the mark a little higher - idling could still be good for
	 * low queueing, and a low queueing number could also just indicate
	 * a SCSI mid layer like behaviour where limit+1 is often seen.
	 */
	if (!cfqd->hw_tag && cfqd->rq_in_driver > 4)
		cfqd->hw_tag = 1;
static void cfq_deactivate_request(request_queue_t *q, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

	WARN_ON(!cfqd->rq_in_driver);
	cfqd->rq_in_driver--;
static void cfq_remove_request(struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
	if (cfqq->next_rq == rq)
		cfqq->next_rq = cfq_find_next_rq(cfqq->cfqd, cfqq, rq);
Linus Torvalds's avatar
Linus Torvalds committed

	list_del_init(&rq->queuelist);
	cfq_del_rq_rb(rq);

	if (rq_is_meta(rq)) {
		WARN_ON(!cfqq->meta_pending);
		cfqq->meta_pending--;
	}
Linus Torvalds's avatar
Linus Torvalds committed
}

static int
cfq_merge(request_queue_t *q, struct request **req, struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct request *__rq;

	__rq = cfq_find_rq_fmerge(cfqd, bio);
	if (__rq && elv_rq_merge_ok(__rq, bio)) {
		*req = __rq;
		return ELEVATOR_FRONT_MERGE;
Linus Torvalds's avatar
Linus Torvalds committed
	}

	return ELEVATOR_NO_MERGE;
}

static void cfq_merged_request(request_queue_t *q, struct request *req,
			       int type)
Linus Torvalds's avatar
Linus Torvalds committed
{
	if (type == ELEVATOR_FRONT_MERGE) {
		struct cfq_queue *cfqq = RQ_CFQQ(req);
Linus Torvalds's avatar
Linus Torvalds committed

		cfq_reposition_rq_rb(cfqq, req);
Linus Torvalds's avatar
Linus Torvalds committed
	}
}

static void
cfq_merged_requests(request_queue_t *q, struct request *rq,
		    struct request *next)
{
	/*
	 * reposition in fifo if next is older than rq
	 */
	if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist) &&
	    time_before(next->start_time, rq->start_time))
		list_move(&rq->queuelist, &next->queuelist);

	cfq_remove_request(next);
static int cfq_allow_merge(request_queue_t *q, struct request *rq,
			   struct bio *bio)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	const int rw = bio_data_dir(bio);
	struct cfq_queue *cfqq;
	pid_t key;

	/*
	 * Disallow merge of a sync bio into an async request.
	if ((bio_data_dir(bio) == READ || bio_sync(bio)) && !rq_is_sync(rq))
	 * Lookup the cfqq that this bio will be queued with. Allow
	 * merge only if rq is queued there.
	key = cfq_queue_pid(current, rw, bio_sync(bio));
	cfqq = cfq_find_cfq_hash(cfqd, key, current->ioprio);

	if (cfqq == RQ_CFQQ(rq))
		return 1;
	return 0;
static inline void
__cfq_set_active_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	if (cfqq) {
		/*
		 * stop potential idle class queues waiting service
		 */
		del_timer(&cfqd->idle_class_timer);

		cfqq->slice_end = 0;
		cfqq->slice_left = 0;
		cfq_clear_cfqq_must_alloc_slice(cfqq);
		cfq_clear_cfqq_fifo_expire(cfqq);
		cfq_mark_cfqq_slice_new(cfqq);
/*
 * current cfqq expired its slice (or was too idle), select new one
 */
static void
__cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		    int preempted)
{
	unsigned long now = jiffies;

	if (cfq_cfqq_wait_request(cfqq))
		del_timer(&cfqd->idle_slice_timer);

	if (!preempted && !cfq_cfqq_dispatched(cfqq))
		cfq_schedule_dispatch(cfqd);

	cfq_clear_cfqq_must_dispatch(cfqq);
	cfq_clear_cfqq_wait_request(cfqq);
	cfq_clear_cfqq_queue_new(cfqq);

	/*
	 * store what was left of this slice, if the queue idled out
	 * or was preempted
	 */
		cfqq->slice_left = cfqq->slice_end - now;
	else
		cfqq->slice_left = 0;

	cfq_resort_rr_list(cfqq, preempted);

	if (cfqq == cfqd->active_queue)
		cfqd->active_queue = NULL;

	if (cfqd->active_cic) {
		put_io_context(cfqd->active_cic->ioc);
		cfqd->active_cic = NULL;
	}

	cfqd->dispatch_slice = 0;
}

static inline void cfq_slice_expired(struct cfq_data *cfqd, int preempted)
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfqq)
		__cfq_slice_expired(cfqd, cfqq, preempted);
}

/*
 * 0
 * 0,1
 * 0,1,2
 * 0,1,2,3
 * 0,1,2,3,4
 * 0,1,2,3,4,5
 * 0,1,2,3,4,5,6
 * 0,1,2,3,4,5,6,7
 */
static int cfq_get_next_prio_level(struct cfq_data *cfqd)
{
	int prio, wrap;

	prio = -1;
	wrap = 0;
	do {
		int p;

		for (p = cfqd->cur_prio; p <= cfqd->cur_end_prio; p++) {
			if (!list_empty(&cfqd->rr_list[p])) {
				prio = p;
				break;
			}
		}

		if (prio != -1)
			break;
		cfqd->cur_prio = 0;
		if (++cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
			cfqd->cur_end_prio = 0;
			if (wrap)
				break;
			wrap = 1;
Linus Torvalds's avatar
Linus Torvalds committed
		}
	} while (1);

	if (unlikely(prio == -1))
		return -1;

	BUG_ON(prio >= CFQ_PRIO_LISTS);

	list_splice_init(&cfqd->rr_list[prio], &cfqd->cur_rr);

	cfqd->cur_prio = prio + 1;
	if (cfqd->cur_prio > cfqd->cur_end_prio) {
		cfqd->cur_end_prio = cfqd->cur_prio;
		cfqd->cur_prio = 0;
	}
	if (cfqd->cur_end_prio == CFQ_PRIO_LISTS) {
		cfqd->cur_prio = 0;
		cfqd->cur_end_prio = 0;
static struct cfq_queue *cfq_set_active_queue(struct cfq_data *cfqd)
	struct cfq_queue *cfqq = NULL;
	if (!list_empty(&cfqd->cur_rr) || cfq_get_next_prio_level(cfqd) != -1) {
		/*
		 * if current list is non-empty, grab first entry. if it is
		 * empty, get next prio level and grab first entry then if any
		 * are spliced
		 */
		cfqq = list_entry_cfqq(cfqd->cur_rr.next);
	} else if (!list_empty(&cfqd->busy_rr)) {
		/*
		 * If no new queues are available, check if the busy list has
		 * some before falling back to idle io.
		 */
		cfqq = list_entry_cfqq(cfqd->busy_rr.next);
	} else if (!list_empty(&cfqd->idle_rr)) {
		/*
		 * if we have idle queues and no rt or be queues had pending
		 * requests, either allow immediate service if the grace period
		 * has passed or arm the idle grace timer
		 */
		unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE;

		if (time_after_eq(jiffies, end))
			cfqq = list_entry_cfqq(cfqd->idle_rr.next);
		else
			mod_timer(&cfqd->idle_class_timer, end);
	}

	__cfq_set_active_queue(cfqd, cfqq);
	return cfqq;
#define CIC_SEEKY(cic) ((cic)->seek_mean > (128 * 1024))

static int cfq_arm_slice_timer(struct cfq_data *cfqd, struct cfq_queue *cfqq)

{
	struct cfq_io_context *cic;
	unsigned long sl;

	WARN_ON(!RB_EMPTY_ROOT(&cfqq->sort_list));
	WARN_ON(cfqq != cfqd->active_queue);

	/*
	 * idle is disabled, either manually or by past process history
	 */
	if (!cfqd->cfq_slice_idle)
		return 0;
	if (!cfq_cfqq_idle_window(cfqq))
		return 0;
	/*
	 * task has exited, don't wait
	 */
	cic = cfqd->active_cic;
	if (!cic || !cic->ioc->task)
	cfq_mark_cfqq_must_dispatch(cfqq);
	cfq_mark_cfqq_wait_request(cfqq);
	sl = min(cfqq->slice_end - 1, (unsigned long) cfqd->cfq_slice_idle);

	/*
	 * we don't want to idle for seeks, but we do want to allow
	 * fair distribution of slice time for a process doing back-to-back
	 * seeks. so allow a little bit of time for him to submit a new rq
	 */
	if (sample_valid(cic->seek_samples) && CIC_SEEKY(cic))
		sl = min(sl, msecs_to_jiffies(2));
	mod_timer(&cfqd->idle_slice_timer, jiffies + sl);
static void cfq_dispatch_insert(request_queue_t *q, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct cfq_queue *cfqq = RQ_CFQQ(rq);
	cfq_remove_request(rq);
	cfqq->on_dispatch[rq_is_sync(rq)]++;
	elv_dispatch_sort(q, rq);

	rq = list_entry(q->queue_head.prev, struct request, queuelist);
	cfqd->last_sector = rq->sector + rq->nr_sectors;
Linus Torvalds's avatar
Linus Torvalds committed
}

/*
 * return expired entry, or NULL to just start from scratch in rbtree
 */
static inline struct request *cfq_check_fifo(struct cfq_queue *cfqq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_data *cfqd = cfqq->cfqd;
Linus Torvalds's avatar
Linus Torvalds committed

	if (cfq_cfqq_fifo_expire(cfqq))
Linus Torvalds's avatar
Linus Torvalds committed
		return NULL;
	if (list_empty(&cfqq->fifo))
		return NULL;
Linus Torvalds's avatar
Linus Torvalds committed

	fifo = cfq_cfqq_class_sync(cfqq);
	rq = rq_entry_fifo(cfqq->fifo.next);
Linus Torvalds's avatar
Linus Torvalds committed

	if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
		cfq_mark_cfqq_fifo_expire(cfqq);
		return rq;
Linus Torvalds's avatar
Linus Torvalds committed
	}

	return NULL;
}

static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_rq = cfqd->cfq_slice_async_rq;
Linus Torvalds's avatar
Linus Torvalds committed

	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
Linus Torvalds's avatar
Linus Torvalds committed

	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
/*
 * get next queue for service
 */
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_queue *cfqq;

	cfqq = cfqd->active_queue;
	if (!cfqq)
		goto new_queue;
Linus Torvalds's avatar
Linus Torvalds committed

	if (!cfq_cfqq_must_dispatch(cfqq) && cfq_slice_used(cfqq))
		goto expire;
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * if queue has requests, dispatch one. if not, check if
	 * enough slice is left to wait for one
	 */
	if (!RB_EMPTY_ROOT(&cfqq->sort_list))
	else if (cfq_cfqq_slice_new(cfqq) || cfq_cfqq_dispatched(cfqq)) {
		cfqq = NULL;
		goto keep_queue;
	} else if (cfq_cfqq_class_sync(cfqq)) {
		if (cfq_arm_slice_timer(cfqd, cfqq))
			return NULL;
	}

expire:
	cfq_slice_expired(cfqd, 0);
new_queue:
	cfqq = cfq_set_active_queue(cfqd);
	return cfqq;
}

static int
__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			int max_dispatch)
{
	int dispatched = 0;

	BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
		struct request *rq;
Linus Torvalds's avatar
Linus Torvalds committed

		/*
		 * follow expired path, else get first next available
Linus Torvalds's avatar
Linus Torvalds committed
		 */
		if ((rq = cfq_check_fifo(cfqq)) == NULL)
			rq = cfqq->next_rq;

		/*
		 * finally, insert request into driver dispatch list
		 */
		cfq_dispatch_insert(cfqd->queue, rq);
Linus Torvalds's avatar
Linus Torvalds committed

		cfqd->dispatch_slice++;
		dispatched++;
Linus Torvalds's avatar
Linus Torvalds committed

		if (!cfqd->active_cic) {
			atomic_inc(&RQ_CIC(rq)->ioc->refcount);
			cfqd->active_cic = RQ_CIC(rq);
Linus Torvalds's avatar
Linus Torvalds committed

		if (RB_EMPTY_ROOT(&cfqq->sort_list))
			break;

	} while (dispatched < max_dispatch);

	/*
	 * expire an async queue immediately if it has used up its slice. idle
	 * queue always expire after 1 dispatch round.
	 */
	if ((!cfq_cfqq_sync(cfqq) &&
	    cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
	    cfq_class_idle(cfqq)) {
		cfqq->slice_end = jiffies + 1;
		cfq_slice_expired(cfqd, 0);
static int
cfq_forced_dispatch_cfqqs(struct list_head *list)
{