Skip to content
sched_rt.c 27.4 KiB
Newer Older
/*
 * Real-Time Scheduling Class (mapped to the SCHED_FIFO and SCHED_RR
 * policies)
 */

#ifdef CONFIG_SMP
static inline int rt_overloaded(struct rq *rq)
	return atomic_read(&rq->rd->rto_count);
static inline void rt_set_overload(struct rq *rq)
{
	cpu_set(rq->cpu, rq->rd->rto_mask);
	/*
	 * Make sure the mask is visible before we set
	 * the overload count. That is checked to determine
	 * if we should look at the mask. It would be a shame
	 * if we looked at the mask, but the mask was not
	 * updated yet.
	 */
	wmb();
	atomic_inc(&rq->rd->rto_count);
static inline void rt_clear_overload(struct rq *rq)
{
	/* the order here really doesn't matter */
	atomic_dec(&rq->rd->rto_count);
	cpu_clear(rq->cpu, rq->rd->rto_mask);

static void update_rt_migration(struct rq *rq)
{
	if (rq->rt.rt_nr_migratory && (rq->rt.rt_nr_running > 1)) {
		if (!rq->rt.overloaded) {
			rt_set_overload(rq);
			rq->rt.overloaded = 1;
		}
	} else if (rq->rt.overloaded) {
		rt_clear_overload(rq);
		rq->rt.overloaded = 0;
	}
#endif /* CONFIG_SMP */

static inline struct task_struct *rt_task_of(struct sched_rt_entity *rt_se)
Peter Zijlstra's avatar
Peter Zijlstra committed
{
	return container_of(rt_se, struct task_struct, rt);
}

static inline int on_rt_rq(struct sched_rt_entity *rt_se)
{
	return !list_empty(&rt_se->run_list);
}

#ifdef CONFIG_FAIR_GROUP_SCHED

static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
{
	if (!rt_rq->tg)
		return SCHED_RT_FRAC;

	return rt_rq->tg->rt_ratio;
}

#define for_each_leaf_rt_rq(rt_rq, rq) \
	list_for_each_entry(rt_rq, &rq->leaf_rt_rq_list, leaf_rt_rq_list)

static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
	return rt_rq->rq;
}

static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
	return rt_se->rt_rq;
}

#define for_each_sched_rt_entity(rt_se) \
	for (; rt_se; rt_se = rt_se->parent)

static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
	return rt_se->my_q;
}

static void enqueue_rt_entity(struct sched_rt_entity *rt_se);
static void dequeue_rt_entity(struct sched_rt_entity *rt_se);

static void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
{
	struct sched_rt_entity *rt_se = rt_rq->rt_se;

	if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) {
		struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;

		enqueue_rt_entity(rt_se);
		if (rt_rq->highest_prio < curr->prio)
			resched_task(curr);
	}
}

static void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
{
	struct sched_rt_entity *rt_se = rt_rq->rt_se;

	if (rt_se && on_rt_rq(rt_se))
		dequeue_rt_entity(rt_se);
}

#else

static inline unsigned int sched_rt_ratio(struct rt_rq *rt_rq)
{
	return sysctl_sched_rt_ratio;
}

#define for_each_leaf_rt_rq(rt_rq, rq) \
	for (rt_rq = &rq->rt; rt_rq; rt_rq = NULL)

static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq)
{
	return container_of(rt_rq, struct rq, rt);
}

static inline struct rt_rq *rt_rq_of_se(struct sched_rt_entity *rt_se)
{
	struct task_struct *p = rt_task_of(rt_se);
	struct rq *rq = task_rq(p);

	return &rq->rt;
}

#define for_each_sched_rt_entity(rt_se) \
	for (; rt_se; rt_se = NULL)

static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se)
{
	return NULL;
}

static inline void sched_rt_ratio_enqueue(struct rt_rq *rt_rq)
{
}

static inline void sched_rt_ratio_dequeue(struct rt_rq *rt_rq)
{
}

#endif

static inline int rt_se_prio(struct sched_rt_entity *rt_se)
{
#ifdef CONFIG_FAIR_GROUP_SCHED
	struct rt_rq *rt_rq = group_rt_rq(rt_se);

	if (rt_rq)
		return rt_rq->highest_prio;
#endif

	return rt_task_of(rt_se)->prio;
}

static int sched_rt_ratio_exceeded(struct rt_rq *rt_rq)
{
	unsigned int rt_ratio = sched_rt_ratio(rt_rq);
Peter Zijlstra's avatar
Peter Zijlstra committed
	u64 period, ratio;

	if (rt_ratio == SCHED_RT_FRAC)
Peter Zijlstra's avatar
Peter Zijlstra committed
		return 0;

	if (rt_rq->rt_throttled)
		return 1;

	period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
	ratio = (period * rt_ratio) >> SCHED_RT_FRAC_SHIFT;
Peter Zijlstra's avatar
Peter Zijlstra committed

	if (rt_rq->rt_time > ratio) {
		struct rq *rq = rq_of_rt_rq(rt_rq);

		rq->rt_throttled = 1;
		rt_rq->rt_throttled = 1;
		sched_rt_ratio_dequeue(rt_rq);
Peter Zijlstra's avatar
Peter Zijlstra committed
		return 1;
	}

	return 0;
}

static void update_sched_rt_period(struct rq *rq)
{
	struct rt_rq *rt_rq;
	u64 period;
	while (rq->clock > rq->rt_period_expire) {
Peter Zijlstra's avatar
Peter Zijlstra committed
		period = (u64)sysctl_sched_rt_period * NSEC_PER_MSEC;
		rq->rt_period_expire += period;
Loading
Loading full blame...