Skip to content
cfq-iosched.c 62.4 KiB
Newer Older
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct cfq_queue *cfqq = crq->cfq_queue;
	struct list_head *head = &q->queue_head, *entry = head;
	struct request *__rq;
	sector_t last;

	list_del(&crq->request->queuelist);

	last = cfqd->last_sector;
	list_for_each_entry_reverse(__rq, head, queuelist) {
		struct cfq_rq *__crq = RQ_DATA(__rq);
Linus Torvalds's avatar
Linus Torvalds committed

		if (blk_barrier_rq(__rq))
			break;
		if (!blk_fs_request(__rq))
Linus Torvalds's avatar
Linus Torvalds committed
			break;
		if (cfq_crq_requeued(__crq))
Linus Torvalds's avatar
Linus Torvalds committed
			break;

		if (__rq->sector <= crq->request->sector)
Linus Torvalds's avatar
Linus Torvalds committed
			break;
		if (__rq->sector > last && crq->request->sector < last) {
			last = crq->request->sector + crq->request->nr_sectors;
Linus Torvalds's avatar
Linus Torvalds committed
			break;
		}
		entry = &__rq->queuelist;
Linus Torvalds's avatar
Linus Torvalds committed
	}

	cfqd->last_sector = last;

	cfqq->next_crq = cfq_find_next_crq(cfqd, cfqq, crq);

	cfq_del_crq_rb(crq);
	cfq_remove_merge_hints(q, crq);

	cfq_mark_crq_in_flight(crq);
	cfq_clear_crq_requeued(crq);

	cfqq->on_dispatch[cfq_crq_is_sync(crq)]++;
	list_add_tail(&crq->request->queuelist, entry);
Linus Torvalds's avatar
Linus Torvalds committed
}

/*
 * return expired entry, or NULL to just start from scratch in rbtree
 */
static inline struct cfq_rq *cfq_check_fifo(struct cfq_queue *cfqq)
{
	struct cfq_data *cfqd = cfqq->cfqd;
Linus Torvalds's avatar
Linus Torvalds committed
	struct cfq_rq *crq;

	if (cfq_cfqq_fifo_expire(cfqq))
Linus Torvalds's avatar
Linus Torvalds committed
		return NULL;

	if (!list_empty(&cfqq->fifo)) {
		int fifo = cfq_cfqq_class_sync(cfqq);
Linus Torvalds's avatar
Linus Torvalds committed

		crq = RQ_DATA(list_entry_fifo(cfqq->fifo.next));
		rq = crq->request;
		if (time_after(jiffies, rq->start_time + cfqd->cfq_fifo_expire[fifo])) {
			cfq_mark_cfqq_fifo_expire(cfqq);
 * Scale schedule slice based on io priority. Use the sync time slice only
 * if a queue is marked sync and has sync io queued. A sync queue with async
 * io only, should not get full sync slice length.
Linus Torvalds's avatar
Linus Torvalds committed
 */
static inline int
cfq_prio_to_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_slice = cfqd->cfq_slice[cfq_cfqq_sync(cfqq)];

	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);

	return base_slice + (base_slice/CFQ_SLICE_SCALE * (4 - cfqq->ioprio));
}

Linus Torvalds's avatar
Linus Torvalds committed
static inline void
cfq_set_prio_slice(struct cfq_data *cfqd, struct cfq_queue *cfqq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	cfqq->slice_end = cfq_prio_to_slice(cfqd, cfqq) + jiffies;
}
Linus Torvalds's avatar
Linus Torvalds committed

static inline int
cfq_prio_to_maxrq(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	const int base_rq = cfqd->cfq_slice_async_rq;
Linus Torvalds's avatar
Linus Torvalds committed

	WARN_ON(cfqq->ioprio >= IOPRIO_BE_NR);
Linus Torvalds's avatar
Linus Torvalds committed

	return 2 * (base_rq + base_rq * (CFQ_PRIO_LISTS - 1 - cfqq->ioprio));
/*
 * get next queue for service
 */
static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd, int force)
Linus Torvalds's avatar
Linus Torvalds committed
{
	unsigned long now = jiffies;
Linus Torvalds's avatar
Linus Torvalds committed
	struct cfq_queue *cfqq;

	cfqq = cfqd->active_queue;
	if (!cfqq)
		goto new_queue;
Linus Torvalds's avatar
Linus Torvalds committed

	if (cfq_cfqq_expired(cfqq))
		goto new_queue;

	if (!cfq_cfqq_must_dispatch(cfqq) && time_after(now, cfqq->slice_end))
		goto expire;
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * if queue has requests, dispatch one. if not, check if
	 * enough slice is left to wait for one
	 */
	if (!RB_EMPTY(&cfqq->sort_list))
		goto keep_queue;
	else if (!force && cfq_cfqq_class_sync(cfqq) &&
		 time_before(now, cfqq->slice_end)) {
		if (cfq_arm_slice_timer(cfqd, cfqq))
			return NULL;
	}

expire:
	cfq_slice_expired(cfqd, 0);
new_queue:
	cfqq = cfq_set_active_queue(cfqd);
	return cfqq;
}

static int
__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
			int max_dispatch)
{
	int dispatched = 0;

	BUG_ON(RB_EMPTY(&cfqq->sort_list));

	do {
		struct cfq_rq *crq;
		 * follow expired path, else get first next available
Linus Torvalds's avatar
Linus Torvalds committed
		 */
		if ((crq = cfq_check_fifo(cfqq)) == NULL)
			crq = cfqq->next_crq;

		/*
		 * finally, insert request into driver dispatch list
		 */
		cfq_dispatch_sort(cfqd->queue, crq);
Linus Torvalds's avatar
Linus Torvalds committed

		cfqd->dispatch_slice++;
		dispatched++;
Linus Torvalds's avatar
Linus Torvalds committed

		if (!cfqd->active_cic) {
			atomic_inc(&crq->io_context->ioc->refcount);
			cfqd->active_cic = crq->io_context;
		}
Linus Torvalds's avatar
Linus Torvalds committed

		if (RB_EMPTY(&cfqq->sort_list))
			break;

	} while (dispatched < max_dispatch);

	/*
	 * if slice end isn't set yet, set it. if at least one request was
	 * sync, use the sync time slice value
	 */
	if (!cfqq->slice_end)
		cfq_set_prio_slice(cfqd, cfqq);

	/*
	 * expire an async queue immediately if it has used up its slice. idle
	 * queue always expire after 1 dispatch round.
	 */
	if ((!cfq_cfqq_sync(cfqq) &&
	    cfqd->dispatch_slice >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
	    cfq_class_idle(cfqq))
		cfq_slice_expired(cfqd, 0);

	return dispatched;
}

static int
cfq_dispatch_requests(request_queue_t *q, int max_dispatch, int force)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct cfq_queue *cfqq;

	if (!cfqd->busy_queues)
		return 0;

	cfqq = cfq_select_queue(cfqd, force);
	if (cfqq) {
		cfq_clear_cfqq_must_dispatch(cfqq);
		cfq_clear_cfqq_wait_request(cfqq);
		del_timer(&cfqd->idle_slice_timer);

		if (cfq_class_idle(cfqq))
			max_dispatch = 1;
Linus Torvalds's avatar
Linus Torvalds committed

		return __cfq_dispatch_requests(cfqd, cfqq, max_dispatch);
Linus Torvalds's avatar
Linus Torvalds committed
}

static inline void cfq_account_dispatch(struct cfq_rq *crq)
{
	struct cfq_queue *cfqq = crq->cfq_queue;
	struct cfq_data *cfqd = cfqq->cfqd;

	if (unlikely(!blk_fs_request(crq->request)))
Linus Torvalds's avatar
Linus Torvalds committed
		return;

	/*
	 * accounted bit is necessary since some drivers will call
	 * elv_next_request() many times for the same request (eg ide)
	 */
	if (cfq_crq_in_driver(crq))
Linus Torvalds's avatar
Linus Torvalds committed
		return;

	cfq_mark_crq_in_driver(crq);
	cfqd->rq_in_driver++;
Linus Torvalds's avatar
Linus Torvalds committed
}

static inline void
cfq_account_completion(struct cfq_queue *cfqq, struct cfq_rq *crq)
{
	struct cfq_data *cfqd = cfqq->cfqd;
Linus Torvalds's avatar
Linus Torvalds committed

	if (!cfq_crq_in_driver(crq))
Linus Torvalds's avatar
Linus Torvalds committed
		return;

Linus Torvalds's avatar
Linus Torvalds committed
	WARN_ON(!cfqd->rq_in_driver);
	cfqd->rq_in_driver--;

	if (!cfq_class_idle(cfqq))
		cfqd->last_end_request = now;
Linus Torvalds's avatar
Linus Torvalds committed

	if (!cfq_cfqq_dispatched(cfqq)) {
		if (cfq_cfqq_on_rr(cfqq)) {
			cfqq->service_last = now;
			cfq_resort_rr_list(cfqq, 0);
		}
		if (cfq_cfqq_expired(cfqq)) {
			__cfq_slice_expired(cfqd, cfqq, 0);
			cfq_schedule_dispatch(cfqd);
		}
Linus Torvalds's avatar
Linus Torvalds committed
	}
	if (cfq_crq_is_sync(crq))
		crq->io_context->last_end_request = now;
Linus Torvalds's avatar
Linus Torvalds committed
}

static struct request *cfq_next_request(request_queue_t *q)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;
	struct request *rq;

	if (!list_empty(&q->queue_head)) {
		struct cfq_rq *crq;
dispatch:
		rq = list_entry_rq(q->queue_head.next);

		crq = RQ_DATA(rq);
		if (crq) {
			struct cfq_queue *cfqq = crq->cfq_queue;

			/*
			 * if idle window is disabled, allow queue buildup
			 */
			if (!cfq_crq_in_driver(crq) &&
			    !cfq_cfqq_idle_window(cfqq) &&
			    cfqd->rq_in_driver >= cfqd->cfq_max_depth)
				return NULL;

Linus Torvalds's avatar
Linus Torvalds committed
			cfq_remove_merge_hints(q, crq);
			cfq_account_dispatch(crq);
		}

		return rq;
	}

	if (cfq_dispatch_requests(q, cfqd->cfq_quantum, 0))
Linus Torvalds's avatar
Linus Torvalds committed
		goto dispatch;

	return NULL;
}

/*
 * task holds one reference to the queue, dropped when task exits. each crq
 * in-flight on this queue also holds a reference, dropped when crq is freed.
 *
 * queue lock must be held here.
 */
static void cfq_put_queue(struct cfq_queue *cfqq)
{
	struct cfq_data *cfqd = cfqq->cfqd;

	BUG_ON(atomic_read(&cfqq->ref) <= 0);
Linus Torvalds's avatar
Linus Torvalds committed

	if (!atomic_dec_and_test(&cfqq->ref))
		return;

	BUG_ON(rb_first(&cfqq->sort_list));
	BUG_ON(cfqq->allocated[READ] + cfqq->allocated[WRITE]);
	BUG_ON(cfq_cfqq_on_rr(cfqq));
Linus Torvalds's avatar
Linus Torvalds committed

	if (unlikely(cfqd->active_queue == cfqq)) {
		__cfq_slice_expired(cfqd, cfqq, 0);
		cfq_schedule_dispatch(cfqd);
Linus Torvalds's avatar
Linus Torvalds committed
	cfq_put_cfqd(cfqq->cfqd);

	/*
	 * it's on the empty list and still hashed
	 */
	list_del(&cfqq->cfq_list);
	hlist_del(&cfqq->cfq_hash);
	kmem_cache_free(cfq_pool, cfqq);
}

static inline struct cfq_queue *
__cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio,
		    const int hashval)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct hlist_head *hash_list = &cfqd->cfq_hash[hashval];
	struct hlist_node *entry, *next;

	hlist_for_each_safe(entry, next, hash_list) {
		struct cfq_queue *__cfqq = list_entry_qhash(entry);
		const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio);
Linus Torvalds's avatar
Linus Torvalds committed

		if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY))
Linus Torvalds's avatar
Linus Torvalds committed
			return __cfqq;
	}

	return NULL;
}

static struct cfq_queue *
cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned short prio)
Linus Torvalds's avatar
Linus Torvalds committed
{
	return __cfq_find_cfq_hash(cfqd, key, prio, hash_long(key, CFQ_QHASH_SHIFT));
static void cfq_free_io_context(struct cfq_io_context *cic)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_io_context *__cic;
	struct list_head *entry, *next;
Linus Torvalds's avatar
Linus Torvalds committed

	list_for_each_safe(entry, next, &cic->list) {
		__cic = list_entry(entry, struct cfq_io_context, list);
		kmem_cache_free(cfq_ioc_pool, __cic);
	kmem_cache_free(cfq_ioc_pool, cic);
/*
 * Called with interrupts disabled
 */
static void cfq_exit_single_io_context(struct cfq_io_context *cic)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_data *cfqd = cic->cfqq->cfqd;
	request_queue_t *q = cfqd->queue;

	WARN_ON(!irqs_disabled());

	spin_lock(q->queue_lock);

	if (unlikely(cic->cfqq == cfqd->active_queue)) {
		__cfq_slice_expired(cfqd, cic->cfqq, 0);
		cfq_schedule_dispatch(cfqd);
	}

	cfq_put_queue(cic->cfqq);
	cic->cfqq = NULL;
	spin_unlock(q->queue_lock);
 * Another task may update the task cic list, if it is doing a queue lookup
 * on its behalf. cfq_cic_lock excludes such concurrent updates
Linus Torvalds's avatar
Linus Torvalds committed
 */
static void cfq_exit_io_context(struct cfq_io_context *cic)
{
	struct cfq_io_context *__cic;
	struct list_head *entry;
Linus Torvalds's avatar
Linus Torvalds committed
	unsigned long flags;

Linus Torvalds's avatar
Linus Torvalds committed
	/*
	 * put the reference this task is holding to the various queues
	 */
	list_for_each(entry, &cic->list) {
Linus Torvalds's avatar
Linus Torvalds committed
		__cic = list_entry(entry, struct cfq_io_context, list);
		cfq_exit_single_io_context(__cic);
	cfq_exit_single_io_context(cic);
	local_irq_restore(flags);
static struct cfq_io_context *
cfq_alloc_io_context(struct cfq_data *cfqd, int gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed

	if (cic) {
		INIT_LIST_HEAD(&cic->list);
		cic->cfqq = NULL;
		cic->key = NULL;
		cic->last_end_request = jiffies;
		cic->ttime_total = 0;
		cic->ttime_samples = 0;
		cic->ttime_mean = 0;
		cic->dtor = cfq_free_io_context;
		cic->exit = cfq_exit_io_context;
static void cfq_init_prio_data(struct cfq_queue *cfqq)
{
	struct task_struct *tsk = current;
	int ioprio_class;

	if (!cfq_cfqq_prio_changed(cfqq))
		return;

	ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio);
	switch (ioprio_class) {
		default:
			printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class);
		case IOPRIO_CLASS_NONE:
			/*
			 * no prio set, place us in the middle of the BE classes
			 */
			cfqq->ioprio = task_nice_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
		case IOPRIO_CLASS_RT:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_RT;
			break;
		case IOPRIO_CLASS_BE:
			cfqq->ioprio = task_ioprio(tsk);
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
			break;
		case IOPRIO_CLASS_IDLE:
			cfqq->ioprio_class = IOPRIO_CLASS_IDLE;
			cfqq->ioprio = 7;
			cfq_clear_cfqq_idle_window(cfqq);
			break;
	}

	/*
	 * keep track of original prio settings in case we have to temporarily
	 * elevate the priority of this queue
	 */
	cfqq->org_ioprio = cfqq->ioprio;
	cfqq->org_ioprio_class = cfqq->ioprio_class;

	if (cfq_cfqq_on_rr(cfqq))
		cfq_resort_rr_list(cfqq, 0);

	cfq_clear_cfqq_prio_changed(cfqq);
}

static inline void changed_ioprio(struct cfq_queue *cfqq)
{
	if (cfqq) {
		struct cfq_data *cfqd = cfqq->cfqd;

		spin_lock(cfqd->queue->queue_lock);
		cfq_mark_cfqq_prio_changed(cfqq);
		cfq_init_prio_data(cfqq);
		spin_unlock(cfqd->queue->queue_lock);
	}
}

/*
 * callback from sys_ioprio_set, irqs are disabled
 */
static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio)
{
	struct cfq_io_context *cic = ioc->cic;

	changed_ioprio(cic->cfqq);

	list_for_each_entry(cic, &cic->list, list)
		changed_ioprio(cic->cfqq);

	return 0;
}

static struct cfq_queue *
cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio,
	      int gfp_mask)
{
	const int hashval = hash_long(key, CFQ_QHASH_SHIFT);
	struct cfq_queue *cfqq, *new_cfqq = NULL;

retry:
	cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval);

	if (!cfqq) {
		if (new_cfqq) {
			cfqq = new_cfqq;
			new_cfqq = NULL;
		} else if (gfp_mask & __GFP_WAIT) {
			spin_unlock_irq(cfqd->queue->queue_lock);
			new_cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
			spin_lock_irq(cfqd->queue->queue_lock);
			goto retry;
		} else {
			cfqq = kmem_cache_alloc(cfq_pool, gfp_mask);
			if (!cfqq)
				goto out;
		}

		memset(cfqq, 0, sizeof(*cfqq));

		INIT_HLIST_NODE(&cfqq->cfq_hash);
		INIT_LIST_HEAD(&cfqq->cfq_list);
		RB_CLEAR_ROOT(&cfqq->sort_list);
		INIT_LIST_HEAD(&cfqq->fifo);

		cfqq->key = key;
		hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]);
		atomic_set(&cfqq->ref, 0);
		cfqq->cfqd = cfqd;
		atomic_inc(&cfqd->ref);
		cfqq->service_last = 0;
		/*
		 * set ->slice_left to allow preemption for a new process
		 */
		cfqq->slice_left = 2 * cfqd->cfq_slice_idle;
		cfq_mark_cfqq_idle_window(cfqq);
		cfq_mark_cfqq_prio_changed(cfqq);
		cfq_init_prio_data(cfqq);
	}

	if (new_cfqq)
		kmem_cache_free(cfq_pool, new_cfqq);

	atomic_inc(&cfqq->ref);
out:
	WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq);
	return cfqq;
}

Linus Torvalds's avatar
Linus Torvalds committed
/*
 * Setup general io context and cfq io context. There can be several cfq
 * io contexts per general io context, if this process is doing io to more
 * than one device managed by cfq. Note that caller is holding a reference to
 * cfqq, so we don't need to worry about it disappearing
 */
static struct cfq_io_context *
cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, int gfp_mask)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct io_context *ioc = NULL;
Linus Torvalds's avatar
Linus Torvalds committed
	struct cfq_io_context *cic;

	might_sleep_if(gfp_mask & __GFP_WAIT);
Linus Torvalds's avatar
Linus Torvalds committed

	ioc = get_io_context(gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
	if (!ioc)
		return NULL;

	if ((cic = ioc->cic) == NULL) {
		cic = cfq_alloc_io_context(cfqd, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed

		if (cic == NULL)
			goto err;

		/*
		 * manually increment generic io_context usage count, it
		 * cannot go away since we are already holding one ref to it
		 */
Linus Torvalds's avatar
Linus Torvalds committed
		ioc->cic = cic;
		ioc->set_ioprio = cfq_ioc_set_ioprio;
Linus Torvalds's avatar
Linus Torvalds committed
		cic->ioc = ioc;
		cic->key = cfqd;
		atomic_inc(&cfqd->ref);
Linus Torvalds's avatar
Linus Torvalds committed
	} else {
		struct cfq_io_context *__cic;

		/*
		 * the first cic on the list is actually the head itself
Linus Torvalds's avatar
Linus Torvalds committed
		 */
		if (cic->key == cfqd)
Linus Torvalds's avatar
Linus Torvalds committed
			goto out;

		/*
		 * cic exists, check if we already are there. linear search
		 * should be ok here, the list will usually not be more than
		 * 1 or a few entries long
		 */
		list_for_each_entry(__cic, &cic->list, list) {
			/*
			 * this process is already holding a reference to
			 * this queue, so no need to get one more
			 */
			if (__cic->key == cfqd) {
Linus Torvalds's avatar
Linus Torvalds committed
				cic = __cic;
				goto out;
			}
		}

		/*
		 * nope, process doesn't have a cic assoicated with this
		 * cfqq yet. get a new one and add to list
		 */
		__cic = cfq_alloc_io_context(cfqd, gfp_mask);
Linus Torvalds's avatar
Linus Torvalds committed
		if (__cic == NULL)
			goto err;

		__cic->ioc = ioc;
		__cic->key = cfqd;
		atomic_inc(&cfqd->ref);
Linus Torvalds's avatar
Linus Torvalds committed
		list_add(&__cic->list, &cic->list);
		cic = __cic;
	}

out:
	return cic;
err:
	put_io_context(ioc);
	return NULL;
}

static void
cfq_update_io_thinktime(struct cfq_data *cfqd, struct cfq_io_context *cic)
Linus Torvalds's avatar
Linus Torvalds committed
{
	unsigned long elapsed, ttime;
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * if this context already has stuff queued, thinktime is from
	 * last queue not last end
	 */
#if 0
	if (time_after(cic->last_end_request, cic->last_queue))
		elapsed = jiffies - cic->last_end_request;
	else
		elapsed = jiffies - cic->last_queue;
#else
		elapsed = jiffies - cic->last_end_request;
#endif
Linus Torvalds's avatar
Linus Torvalds committed

	ttime = min(elapsed, 2UL * cfqd->cfq_slice_idle);
	cic->ttime_samples = (7*cic->ttime_samples + 256) / 8;
	cic->ttime_total = (7*cic->ttime_total + 256*ttime) / 8;
	cic->ttime_mean = (cic->ttime_total + 128) / cic->ttime_samples;
}
Linus Torvalds's avatar
Linus Torvalds committed

#define sample_valid(samples)	((samples) > 80)
Linus Torvalds's avatar
Linus Torvalds committed

/*
 * Disable idle window if the process thinks too long or seeks so much that
 * it doesn't matter
 */
static void
cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		       struct cfq_io_context *cic)
{
	int enable_idle = cfq_cfqq_idle_window(cfqq);
Linus Torvalds's avatar
Linus Torvalds committed

	if (!cic->ioc->task || !cfqd->cfq_slice_idle)
		enable_idle = 0;
	else if (sample_valid(cic->ttime_samples)) {
		if (cic->ttime_mean > cfqd->cfq_slice_idle)
			enable_idle = 0;
		else
			enable_idle = 1;
	if (enable_idle)
		cfq_mark_cfqq_idle_window(cfqq);
	else
		cfq_clear_cfqq_idle_window(cfqq);
Linus Torvalds's avatar
Linus Torvalds committed


/*
 * Check if new_cfqq should preempt the currently active queue. Return 0 for
 * no or if we aren't sure, a 1 will cause a preempt.
 */
static int
cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
		   struct cfq_rq *crq)
{
	struct cfq_queue *cfqq = cfqd->active_queue;

	if (cfq_class_idle(new_cfqq))
		return 0;

	if (!cfqq)
		return 1;

	if (cfq_class_idle(cfqq))
		return 1;
	if (!cfq_cfqq_wait_request(new_cfqq))
		return 0;
	/*
	 * if it doesn't have slice left, forget it
	 */
	if (new_cfqq->slice_left < cfqd->cfq_slice_idle)
		return 0;
	if (cfq_crq_is_sync(crq) && !cfq_cfqq_sync(cfqq))
		return 1;

	return 0;
}

/*
 * cfqq preempts the active queue. if we allowed preempt with no slice left,
 * let it have half of its nominal slice.
 */
static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	struct cfq_queue *__cfqq, *next;

	list_for_each_entry_safe(__cfqq, next, &cfqd->cur_rr, cfq_list)
		cfq_resort_rr_list(__cfqq, 1);

	if (!cfqq->slice_left)
		cfqq->slice_left = cfq_prio_to_slice(cfqd, cfqq) / 2;

	cfqq->slice_end = cfqq->slice_left + jiffies;
	__cfq_slice_expired(cfqd, cfqq, 1);
	__cfq_set_active_queue(cfqd, cfqq);
}

/*
 * should really be a ll_rw_blk.c helper
 */
static void cfq_start_queueing(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
	request_queue_t *q = cfqd->queue;

	if (!blk_queue_plugged(q))
		q->request_fn(q);
	else
		__generic_unplug_device(q);
}

/*
 * Called when a new fs request (crq) is added (to cfqq). Check if there's
 * something we should do about it
 */
static void
cfq_crq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		 struct cfq_rq *crq)
{
	struct cfq_io_context *cic;

	cfqq->next_crq = cfq_choose_req(cfqd, cfqq->next_crq, crq);

	/*
	 * we never wait for an async request and we don't allow preemption
	 * of an async request. so just return early
	 */
	if (!cfq_crq_is_sync(crq))
		return;
	cic = crq->io_context;
	cfq_update_io_thinktime(cfqd, cic);
	cfq_update_idle_window(cfqd, cfqq, cic);

	cic->last_queue = jiffies;

	if (cfqq == cfqd->active_queue) {
		/*
		 * if we are waiting for a request for this queue, let it rip
		 * immediately and flag that we must not expire this queue
		 * just now
		 */
		if (cfq_cfqq_wait_request(cfqq)) {
			cfq_mark_cfqq_must_dispatch(cfqq);
			del_timer(&cfqd->idle_slice_timer);
			cfq_start_queueing(cfqd, cfqq);
		}
	} else if (cfq_should_preempt(cfqd, cfqq, crq)) {
		/*
		 * not the active queue - expire current slice if it is
		 * idle and has expired it's mean thinktime or this new queue
		 * has some old slice time left and is of higher priority
		 */
		cfq_preempt_queue(cfqd, cfqq);
		cfq_mark_cfqq_must_dispatch(cfqq);
		cfq_start_queueing(cfqd, cfqq);
	}
static void cfq_enqueue(struct cfq_data *cfqd, struct request *rq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	struct cfq_rq *crq = RQ_DATA(rq);
	struct cfq_queue *cfqq = crq->cfq_queue;

	cfq_init_prio_data(cfqq);
Linus Torvalds's avatar
Linus Torvalds committed

	cfq_add_crq_rb(crq);

	list_add_tail(&rq->queuelist, &cfqq->fifo);

	if (rq_mergeable(rq)) {
		cfq_add_crq_hash(cfqd, crq);

		if (!cfqd->queue->last_merge)
			cfqd->queue->last_merge = rq;
	}

	cfq_crq_enqueued(cfqd, cfqq, crq);
Linus Torvalds's avatar
Linus Torvalds committed
}

static void
cfq_insert_request(request_queue_t *q, struct request *rq, int where)
{
	struct cfq_data *cfqd = q->elevator->elevator_data;

	switch (where) {
		case ELEVATOR_INSERT_BACK:
			while (cfq_dispatch_requests(q, INT_MAX, 1))
Linus Torvalds's avatar
Linus Torvalds committed
				;
			list_add_tail(&rq->queuelist, &q->queue_head);
			/*
			 * If we were idling with pending requests on
			 * inactive cfqqs, force dispatching will
			 * remove the idle timer and the queue won't
			 * be kicked by __make_request() afterward.
			 * Kick it here.
			 */
			cfq_schedule_dispatch(cfqd);
Linus Torvalds's avatar
Linus Torvalds committed
			break;
		case ELEVATOR_INSERT_FRONT:
			list_add(&rq->queuelist, &q->queue_head);
			break;
		case ELEVATOR_INSERT_SORT:
			BUG_ON(!blk_fs_request(rq));
			cfq_enqueue(cfqd, rq);
Linus Torvalds's avatar
Linus Torvalds committed
			break;
		default:
			printk("%s: bad insert point %d\n", __FUNCTION__,where);
			return;
	}
Linus Torvalds's avatar
Linus Torvalds committed

static void cfq_completed_request(request_queue_t *q, struct request *rq)
{
	struct cfq_rq *crq = RQ_DATA(rq);
	struct cfq_queue *cfqq;

	if (unlikely(!blk_fs_request(rq)))
		return;

	cfqq = crq->cfq_queue;

	if (cfq_crq_in_flight(crq)) {
		const int sync = cfq_crq_is_sync(crq);

		WARN_ON(!cfqq->on_dispatch[sync]);
		cfqq->on_dispatch[sync]--;
Linus Torvalds's avatar
Linus Torvalds committed
	}

	cfq_account_completion(cfqq, crq);
}

static struct request *
cfq_former_request(request_queue_t *q, struct request *rq)
{
	struct cfq_rq *crq = RQ_DATA(rq);
	struct rb_node *rbprev = rb_prev(&crq->rb_node);

	if (rbprev)
		return rb_entry_crq(rbprev)->request;

	return NULL;
}

static struct request *
cfq_latter_request(request_queue_t *q, struct request *rq)
{
	struct cfq_rq *crq = RQ_DATA(rq);
	struct rb_node *rbnext = rb_next(&crq->rb_node);

	if (rbnext)
		return rb_entry_crq(rbnext)->request;

	return NULL;
}

/*
 * we temporarily boost lower priority queues if they are holding fs exclusive
 * resources. they are boosted to normal prio (CLASS_BE/4)
 */
static void cfq_prio_boost(struct cfq_queue *cfqq)
Linus Torvalds's avatar
Linus Torvalds committed
{
	const int ioprio_class = cfqq->ioprio_class;
	const int ioprio = cfqq->ioprio;
Linus Torvalds's avatar
Linus Torvalds committed

	if (has_fs_excl()) {
		/*
		 * boost idle prio on transactions that would lock out other
		 * users of the filesystem
		 */
		if (cfq_class_idle(cfqq))
			cfqq->ioprio_class = IOPRIO_CLASS_BE;
		if (cfqq->ioprio > IOPRIO_NORM)
			cfqq->ioprio = IOPRIO_NORM;
	} else {
		/*
		 * check if we need to unboost the queue
		 */
		if (cfqq->ioprio_class != cfqq->org_ioprio_class)
			cfqq->ioprio_class = cfqq->org_ioprio_class;
		if (cfqq->ioprio != cfqq->org_ioprio)
			cfqq->ioprio = cfqq->org_ioprio;
	}
Linus Torvalds's avatar
Linus Torvalds committed

	/*
	 * refile between round-robin lists if we moved the priority class
	 */
	if ((ioprio_class != cfqq->ioprio_class || ioprio != cfqq->ioprio) &&
	    cfq_cfqq_on_rr(cfqq))
		cfq_resort_rr_list(cfqq, 0);
}
Linus Torvalds's avatar
Linus Torvalds committed

static inline pid_t cfq_queue_pid(struct task_struct *task, int rw)
{
	if (rw == READ || process_sync(task))
		return task->pid;
Linus Torvalds's avatar
Linus Torvalds committed

Linus Torvalds's avatar
Linus Torvalds committed

static inline int
__cfq_may_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq,
		struct task_struct *task, int rw)
{
#if 1
	if ((cfq_cfqq_wait_request(cfqq) || cfq_cfqq_must_alloc(cfqq)) &&
Andrew Morton's avatar
Andrew Morton committed
	    !cfq_cfqq_must_alloc_slice(cfqq)) {
		cfq_mark_cfqq_must_alloc_slice(cfqq);
		return ELV_MQUEUE_MUST;
Linus Torvalds's avatar
Linus Torvalds committed

	return ELV_MQUEUE_MAY;
	if (!cfqq || task->flags & PF_MEMALLOC)
		return ELV_MQUEUE_MAY;
	if (!cfqq->allocated[rw] || cfq_cfqq_must_alloc(cfqq)) {
		if (cfq_cfqq_wait_request(cfqq))
			return ELV_MQUEUE_MUST;
Linus Torvalds's avatar
Linus Torvalds committed

		/*
		 * only allow 1 ELV_MQUEUE_MUST per slice, otherwise we
		 * can quickly flood the queue with writes from a single task
		 */
Andrew Morton's avatar
Andrew Morton committed
		if (rw == READ || !cfq_cfqq_must_alloc_slice(cfqq)) {
			cfq_mark_cfqq_must_alloc_slice(cfqq);
			return ELV_MQUEUE_MUST;
Linus Torvalds's avatar
Linus Torvalds committed
		}
Linus Torvalds's avatar
Linus Torvalds committed
	}
	if (cfq_class_idle(cfqq))
		return ELV_MQUEUE_NO;
	if (cfqq->allocated[rw] >= cfqd->max_queued) {
		struct io_context *ioc = get_io_context(GFP_ATOMIC);
		int ret = ELV_MQUEUE_NO;
Linus Torvalds's avatar
Linus Torvalds committed

		if (ioc && ioc->nr_batch_requests)
			ret = ELV_MQUEUE_MAY;

		put_io_context(ioc);
		return ret;