Skip to content
cgroup.c 144 KiB
Newer Older
static int cgroup_populate_dir(struct cgroup *cgrp, unsigned int subsys_mask);
Tejun Heo's avatar
Tejun Heo committed
static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
static const struct file_operations proc_cgroupstats_operations;
static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
			      char *buf)
	if (cft->ss && !(cft->flags & CFTYPE_NO_PREFIX) &&
	    !(cgrp->root->flags & CGRP_ROOT_NOPREFIX))
		snprintf(buf, CGROUP_FILE_NAME_MAX, "%s.%s",
			 cft->ss->name, cft->name);
	else
		strncpy(buf, cft->name, CGROUP_FILE_NAME_MAX);
	return buf;
/**
 * cgroup_file_mode - deduce file mode of a control file
 * @cft: the control file in question
 *
 * returns cft->mode if ->mode is not 0
 * returns S_IRUGO|S_IWUSR if it has both a read and a write handler
 * returns S_IRUGO if it has only a read handler
 * returns S_IWUSR if it has only a write hander
 */
static umode_t cgroup_file_mode(const struct cftype *cft)
	if (cft->mode)
		return cft->mode;

	if (cft->read_u64 || cft->read_s64 || cft->seq_show)
		mode |= S_IRUGO;

	if (cft->write_u64 || cft->write_s64 || cft->write)
static void cgroup_free_fn(struct work_struct *work)
{
	struct cgroup *cgrp = container_of(work, struct cgroup, destroy_work);
	cgroup_pidlist_destroy_all(cgrp);
	if (cgrp->parent) {
		/*
		 * We get a ref to the parent, and put the ref when this
		 * cgroup is being freed, so it's guaranteed that the
		 * parent won't be destroyed before its children.
		 */
		cgroup_put(cgrp->parent);
		kernfs_put(cgrp->kn);
		kfree(cgrp);
	} else {
		/*
		 * This is root cgroup's refcnt reaching zero, which
		 * indicates that the root should be released.
		 */
		cgroup_destroy_root(cgrp->root);
	}
}

static void cgroup_free_rcu(struct rcu_head *head)
{
	struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);

	INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
	queue_work(cgroup_destroy_wq, &cgrp->destroy_work);
static void cgroup_get(struct cgroup *cgrp)
Tejun Heo's avatar
Tejun Heo committed
	WARN_ON_ONCE(cgroup_is_dead(cgrp));
	WARN_ON_ONCE(atomic_read(&cgrp->refcnt) <= 0);
	atomic_inc(&cgrp->refcnt);
static void cgroup_put(struct cgroup *cgrp)
Tejun Heo's avatar
Tejun Heo committed
	if (!atomic_dec_and_test(&cgrp->refcnt))
		return;
	if (WARN_ON_ONCE(cgrp->parent && !cgroup_is_dead(cgrp)))
Tejun Heo's avatar
Tejun Heo committed
		return;
	cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
Tejun Heo's avatar
Tejun Heo committed
	cgrp->id = -1;
Tejun Heo's avatar
Tejun Heo committed
	call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
static void cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
Tejun Heo's avatar
Tejun Heo committed
	char name[CGROUP_FILE_NAME_MAX];
	lockdep_assert_held(&cgroup_tree_mutex);
Tejun Heo's avatar
Tejun Heo committed
	kernfs_remove_by_name(cgrp->kn, cgroup_file_name(cgrp, cft, name));
 * cgroup_clear_dir - remove subsys files in a cgroup directory
 * @cgrp: target cgroup
 * @subsys_mask: mask of the subsystem ids whose files should be removed
 */
static void cgroup_clear_dir(struct cgroup *cgrp, unsigned int subsys_mask)
	struct cgroup_subsys *ss;
Tejun Heo's avatar
Tejun Heo committed
		struct cftype *cfts;
		if (!(subsys_mask & (1 << i)))
Tejun Heo's avatar
Tejun Heo committed
		list_for_each_entry(cfts, &ss->cfts, node)
			cgroup_addrm_files(cgrp, cfts, false);
static int rebind_subsystems(struct cgroup_root *dst_root, unsigned int ss_mask)
	struct cgroup_subsys *ss;
	int ssid, i, ret;
	lockdep_assert_held(&cgroup_tree_mutex);
	lockdep_assert_held(&cgroup_mutex);
	for_each_subsys(ss, ssid) {
		if (!(ss_mask & (1 << ssid)))
			continue;
		/* if @ss has non-root csses attached to it, can't move */
		if (css_next_child(NULL, cgroup_css(&ss->root->cgrp, ss)))
Tejun Heo's avatar
Tejun Heo committed
			return -EBUSY;
		/* can't move between two non-dummy roots either */
		if (ss->root != &cgrp_dfl_root && dst_root != &cgrp_dfl_root)
	ret = cgroup_populate_dir(&dst_root->cgrp, ss_mask);
	if (ret) {
		if (dst_root != &cgrp_dfl_root)
		/*
		 * Rebinding back to the default root is not allowed to
		 * fail.  Using both default and non-default roots should
		 * be rare.  Moving subsystems back and forth even more so.
		 * Just warn about it and continue.
		 */
		if (cgrp_dfl_root_visible) {
			pr_warn("failed to create files (%d) while rebinding 0x%x to default root\n",
			pr_warn("you may retry by moving them to a different hierarchy and unbinding\n");

	/*
	 * Nothing can fail from this point on.  Remove files for the
	 * removed subsystems and rebind each subsystem.
	 */
	mutex_unlock(&cgroup_mutex);
		if (ss_mask & (1 << ssid))
			cgroup_clear_dir(&ss->root->cgrp, 1 << ssid);
	mutex_lock(&cgroup_mutex);
		struct cgroup_root *src_root;
		struct css_set *cset;
		css = cgroup_css(&src_root->cgrp, ss);
		WARN_ON(!css || cgroup_css(&dst_root->cgrp, ss));
		RCU_INIT_POINTER(src_root->cgrp.subsys[ssid], NULL);
		rcu_assign_pointer(dst_root->cgrp.subsys[ssid], css);
		css->cgroup = &dst_root->cgrp;
		down_write(&css_set_rwsem);
		hash_for_each(css_set_table, i, cset, hlist)
			list_move_tail(&cset->e_cset_node[ss->id],
				       &dst_root->cgrp.e_csets[ss->id]);
		up_write(&css_set_rwsem);

		src_root->subsys_mask &= ~(1 << ssid);
		src_root->cgrp.child_subsys_mask &= ~(1 << ssid);

		/* default hierarchy doesn't enable controllers by default */
		if (dst_root != &cgrp_dfl_root)
			dst_root->cgrp.child_subsys_mask |= 1 << ssid;
	kernfs_activate(dst_root->cgrp.kn);
Tejun Heo's avatar
Tejun Heo committed
static int cgroup_show_options(struct seq_file *seq,
			       struct kernfs_root *kf_root)
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
	struct cgroup_subsys *ss;
	int ssid;
	for_each_subsys(ss, ssid)
			seq_printf(seq, ",%s", ss->name);
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR)
		seq_puts(seq, ",sane_behavior");
	if (root->flags & CGRP_ROOT_NOPREFIX)
		seq_puts(seq, ",noprefix");
	if (root->flags & CGRP_ROOT_XATTR)
		seq_puts(seq, ",xattr");

	spin_lock(&release_agent_path_lock);
	if (strlen(root->release_agent_path))
		seq_printf(seq, ",release_agent=%s", root->release_agent_path);
	spin_unlock(&release_agent_path_lock);

	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags))
		seq_puts(seq, ",clone_children");
	if (strlen(root->name))
		seq_printf(seq, ",name=%s", root->name);
	return 0;
}

struct cgroup_sb_opts {
	unsigned int subsys_mask;
	unsigned int flags;
	bool cpuset_clone_children;
	/* User explicitly requested empty subsystem */
	bool none;
static int parse_cgroupfs_options(char *data, struct cgroup_sb_opts *opts)
	char *token, *o = data;
	bool all_ss = false, one_ss = false;
	unsigned int mask = -1U;
	struct cgroup_subsys *ss;
	int i;
	mask = ~(1U << cpuset_cgrp_id);
	memset(opts, 0, sizeof(*opts));

	while ((token = strsep(&o, ",")) != NULL) {
		if (!*token)
			return -EINVAL;
		if (!strcmp(token, "none")) {
			/* Explicitly have no subsystems */
			opts->none = true;
			continue;
		}
		if (!strcmp(token, "all")) {
			/* Mutually exclusive option 'all' + subsystem name */
			if (one_ss)
				return -EINVAL;
			all_ss = true;
			continue;
		}
		if (!strcmp(token, "__DEVEL__sane_behavior")) {
			opts->flags |= CGRP_ROOT_SANE_BEHAVIOR;
			continue;
		}
		if (!strcmp(token, "noprefix")) {
			opts->flags |= CGRP_ROOT_NOPREFIX;
			continue;
		}
		if (!strcmp(token, "clone_children")) {
			opts->cpuset_clone_children = true;
		if (!strcmp(token, "xattr")) {
			opts->flags |= CGRP_ROOT_XATTR;
			continue;
		}
		if (!strncmp(token, "release_agent=", 14)) {
			/* Specifying two release agents is forbidden */
			if (opts->release_agent)
				return -EINVAL;
			opts->release_agent =
				kstrndup(token + 14, PATH_MAX - 1, GFP_KERNEL);
			if (!opts->release_agent)
				return -ENOMEM;
			continue;
		}
		if (!strncmp(token, "name=", 5)) {
			const char *name = token + 5;
			/* Can't specify an empty name */
			if (!strlen(name))
				return -EINVAL;
			/* Must match [\w.-]+ */
			for (i = 0; i < strlen(name); i++) {
				char c = name[i];
				if (isalnum(c))
					continue;
				if ((c == '.') || (c == '-') || (c == '_'))
					continue;
				return -EINVAL;
			}
			/* Specifying two names is forbidden */
			if (opts->name)
				return -EINVAL;
			opts->name = kstrndup(name,
					      MAX_CGROUP_ROOT_NAMELEN - 1,
					      GFP_KERNEL);
			if (!opts->name)
				return -ENOMEM;
		for_each_subsys(ss, i) {
			if (strcmp(token, ss->name))
				continue;
			if (ss->disabled)
				continue;

			/* Mutually exclusive option 'all' + subsystem name */
			if (all_ss)
				return -EINVAL;
			opts->subsys_mask |= (1 << i);
			one_ss = true;

			break;
		}
		if (i == CGROUP_SUBSYS_COUNT)
			return -ENOENT;
	}

	if (opts->flags & CGRP_ROOT_SANE_BEHAVIOR) {
		pr_warn("sane_behavior: this is still under development and its behaviors will change, proceed at your own risk\n");
		if ((opts->flags & (CGRP_ROOT_NOPREFIX | CGRP_ROOT_XATTR)) ||
		    opts->cpuset_clone_children || opts->release_agent ||
		    opts->name) {
			pr_err("sane_behavior: noprefix, xattr, clone_children, release_agent and name are not allowed\n");
	} else {
		/*
		 * If the 'all' option was specified select all the
		 * subsystems, otherwise if 'none', 'name=' and a subsystem
		 * name options were not specified, let's default to 'all'
		 */
		if (all_ss || (!one_ss && !opts->none && !opts->name))
			for_each_subsys(ss, i)
				if (!ss->disabled)
					opts->subsys_mask |= (1 << i);
		/*
		 * We either have to specify by name or by subsystems. (So
		 * all empty hierarchies must have a name).
		 */
		if (!opts->subsys_mask && !opts->name)
	/*
	 * Option noprefix was introduced just for backward compatibility
	 * with the old cpuset, so we allow noprefix only if mounting just
	 * the cpuset subsystem.
	 */
	if ((opts->flags & CGRP_ROOT_NOPREFIX) && (opts->subsys_mask & mask))

	/* Can't specify "none" and some subsystems */
	if (opts->subsys_mask && opts->none)
Tejun Heo's avatar
Tejun Heo committed
static int cgroup_remount(struct kernfs_root *kf_root, int *flags, char *data)
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
	struct cgroup_sb_opts opts;
	unsigned int added_mask, removed_mask;
	if (root->flags & CGRP_ROOT_SANE_BEHAVIOR) {
		pr_err("sane_behavior: remount is not allowed\n");
	mutex_lock(&cgroup_tree_mutex);
	mutex_lock(&cgroup_mutex);

	/* See what subsystems are wanted */
	ret = parse_cgroupfs_options(data, &opts);
	if (ret)
		goto out_unlock;

	if (opts.subsys_mask != root->subsys_mask || opts.release_agent)
		pr_warn("option changes via remount are deprecated (pid=%d comm=%s)\n",
			task_tgid_nr(current), current->comm);
	added_mask = opts.subsys_mask & ~root->subsys_mask;
	removed_mask = root->subsys_mask & ~opts.subsys_mask;
	/* Don't allow flags or name to change at remount */
	if (((opts.flags ^ root->flags) & CGRP_ROOT_OPTION_MASK) ||
	    (opts.name && strcmp(opts.name, root->name))) {
		pr_err("option or name mismatch, new: 0x%x \"%s\", old: 0x%x \"%s\"\n",
		       opts.flags & CGRP_ROOT_OPTION_MASK, opts.name ?: "",
		       root->flags & CGRP_ROOT_OPTION_MASK, root->name);
		ret = -EINVAL;
		goto out_unlock;
	}

	/* remounting is not allowed for populated hierarchies */
	if (!list_empty(&root->cgrp.children)) {
	ret = rebind_subsystems(root, added_mask);
	rebind_subsystems(&cgrp_dfl_root, removed_mask);
	if (opts.release_agent) {
		spin_lock(&release_agent_path_lock);
		strcpy(root->release_agent_path, opts.release_agent);
		spin_unlock(&release_agent_path_lock);
	}
	kfree(opts.release_agent);
	kfree(opts.name);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_tree_mutex);
/*
 * To reduce the fork() overhead for systems that are not actually using
 * their cgroups capability, we don't maintain the lists running through
 * each css_set to its tasks until we see the list actually used - in other
 * words after the first mount.
 */
static bool use_task_css_set_links __read_mostly;

static void cgroup_enable_task_cg_lists(void)
{
	struct task_struct *p, *g;

	down_write(&css_set_rwsem);

	if (use_task_css_set_links)
		goto out_unlock;

	use_task_css_set_links = true;

	/*
	 * We need tasklist_lock because RCU is not safe against
	 * while_each_thread(). Besides, a forking task that has passed
	 * cgroup_post_fork() without seeing use_task_css_set_links = 1
	 * is not guaranteed to have its child immediately visible in the
	 * tasklist if we walk through it with RCU.
	 */
	read_lock(&tasklist_lock);
	do_each_thread(g, p) {
		WARN_ON_ONCE(!list_empty(&p->cg_list) ||
			     task_css_set(p) != &init_css_set);

		/*
		 * We should check if the process is exiting, otherwise
		 * it will race with cgroup_exit() in that the list
		 * entry won't be deleted though the process has exited.
		 * Do it while holding siglock so that we don't end up
		 * racing against cgroup_exit().
		spin_lock_irq(&p->sighand->siglock);
		if (!(p->flags & PF_EXITING)) {
			struct css_set *cset = task_css_set(p);

			list_add(&p->cg_list, &cset->tasks);
			get_css_set(cset);
		}
		spin_unlock_irq(&p->sighand->siglock);
	} while_each_thread(g, p);
	read_unlock(&tasklist_lock);
out_unlock:
static void init_cgroup_housekeeping(struct cgroup *cgrp)
{
	struct cgroup_subsys *ss;
	int ssid;

Tejun Heo's avatar
Tejun Heo committed
	atomic_set(&cgrp->refcnt, 1);
	INIT_LIST_HEAD(&cgrp->sibling);
	INIT_LIST_HEAD(&cgrp->children);
	INIT_LIST_HEAD(&cgrp->cset_links);
	INIT_LIST_HEAD(&cgrp->release_list);
	INIT_LIST_HEAD(&cgrp->pidlists);
	mutex_init(&cgrp->pidlist_mutex);
Tejun Heo's avatar
Tejun Heo committed
	cgrp->dummy_css.cgroup = cgrp;

	for_each_subsys(ss, ssid)
		INIT_LIST_HEAD(&cgrp->e_csets[ssid]);
static void init_cgroup_root(struct cgroup_root *root,
			     struct cgroup_sb_opts *opts)
	struct cgroup *cgrp = &root->cgrp;
	INIT_LIST_HEAD(&root->root_list);
	init_cgroup_housekeeping(cgrp);
	idr_init(&root->cgroup_idr);

	root->flags = opts->flags;
	if (opts->release_agent)
		strcpy(root->release_agent_path, opts->release_agent);
	if (opts->name)
		strcpy(root->name, opts->name);
	if (opts->cpuset_clone_children)
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &root->cgrp.flags);
static int cgroup_setup_root(struct cgroup_root *root, unsigned int ss_mask)
	struct cgroup *root_cgrp = &root->cgrp;
	struct css_set *cset;
	int i, ret;
	lockdep_assert_held(&cgroup_tree_mutex);
	lockdep_assert_held(&cgroup_mutex);
	ret = cgroup_idr_alloc(&root->cgroup_idr, root_cgrp, 1, 2, GFP_NOWAIT);
Tejun Heo's avatar
Tejun Heo committed
		goto out;
	 * We're accessing css_set_count without locking css_set_rwsem here,
	 * but that's OK - it can only be increased by someone holding
	 * cgroup_lock, and that's us. The worst that can happen is that we
	 * have some link structures left over
	 */
	ret = allocate_cgrp_cset_links(css_set_count, &tmp_links);
	if (ret)
Tejun Heo's avatar
Tejun Heo committed
		goto out;
	ret = cgroup_init_root_id(root);
Tejun Heo's avatar
Tejun Heo committed
		goto out;
Tejun Heo's avatar
Tejun Heo committed
	root->kf_root = kernfs_create_root(&cgroup_kf_syscall_ops,
					   KERNFS_ROOT_CREATE_DEACTIVATED,
					   root_cgrp);
	if (IS_ERR(root->kf_root)) {
		ret = PTR_ERR(root->kf_root);
		goto exit_root_id;
	}
	root_cgrp->kn = root->kf_root->kn;
	ret = cgroup_addrm_files(root_cgrp, cgroup_base_files, true);
	if (ret)
Tejun Heo's avatar
Tejun Heo committed
		goto destroy_root;
	ret = rebind_subsystems(root, ss_mask);
Tejun Heo's avatar
Tejun Heo committed
		goto destroy_root;
	/*
	 * There must be no failure case after here, since rebinding takes
	 * care of subsystems' refcounts, which are explicitly dropped in
	 * the failure exit path.
	 */
	list_add(&root->root_list, &cgroup_roots);
	cgroup_root_count++;
Al Viro's avatar
Al Viro committed

	 * Link the root cgroup in this hierarchy into all the css_set
	down_write(&css_set_rwsem);
	hash_for_each(css_set_table, i, cset, hlist)
		link_css_set(&tmp_links, cset, root_cgrp);
	BUG_ON(!list_empty(&root_cgrp->children));
	BUG_ON(atomic_read(&root->nr_cgrps) != 1);
Tejun Heo's avatar
Tejun Heo committed
	kernfs_activate(root_cgrp->kn);
Tejun Heo's avatar
Tejun Heo committed
	goto out;
Tejun Heo's avatar
Tejun Heo committed
destroy_root:
	kernfs_destroy_root(root->kf_root);
	root->kf_root = NULL;
exit_root_id:
	cgroup_exit_root_id(root);
Tejun Heo's avatar
Tejun Heo committed
out:
	free_cgrp_cset_links(&tmp_links);
	return ret;
Al Viro's avatar
Al Viro committed
static struct dentry *cgroup_mount(struct file_system_type *fs_type,
			 int flags, const char *unused_dev_name,
Al Viro's avatar
Al Viro committed
			 void *data)
	struct cgroup_root *root;
	struct cgroup_sb_opts opts;
Tejun Heo's avatar
Tejun Heo committed
	struct dentry *dentry;
	bool new_sb;
	/*
	 * The first time anyone tries to mount a cgroup, enable the list
	 * linking each css_set to its tasks and fix up all existing tasks.
	 */
	if (!use_task_css_set_links)
		cgroup_enable_task_cg_lists();
	mutex_lock(&cgroup_tree_mutex);
Ben Blum's avatar
Ben Blum committed
	mutex_lock(&cgroup_mutex);

	/* First find the desired set of subsystems */
	ret = parse_cgroupfs_options(data, &opts);
Tejun Heo's avatar
Tejun Heo committed
	/* look for a matching existing root */
	if (!opts.subsys_mask && !opts.none && !opts.name) {
		cgrp_dfl_root_visible = true;
		root = &cgrp_dfl_root;
		cgroup_get(&root->cgrp);
		ret = 0;
		goto out_unlock;
Tejun Heo's avatar
Tejun Heo committed
		bool name_match = false;
		if (root == &cgrp_dfl_root)
Tejun Heo's avatar
Tejun Heo committed
		 * If we asked for a name then it must match.  Also, if
		 * name matches but sybsys_mask doesn't, we should fail.
		 * Remember whether name matched.
Tejun Heo's avatar
Tejun Heo committed
		if (opts.name) {
			if (strcmp(opts.name, root->name))
				continue;
			name_match = true;
		}
Tejun Heo's avatar
Tejun Heo committed
		 * If we asked for subsystems (or explicitly for no
		 * subsystems) then they must match.
Tejun Heo's avatar
Tejun Heo committed
		if ((opts.subsys_mask || opts.none) &&
Tejun Heo's avatar
Tejun Heo committed
			if (!name_match)
				continue;
			ret = -EBUSY;
			goto out_unlock;
		}
		if ((root->flags ^ opts.flags) & CGRP_ROOT_OPTION_MASK) {
			if ((root->flags | opts.flags) & CGRP_ROOT_SANE_BEHAVIOR) {
				pr_err("sane_behavior: new mount options should match the existing superblock\n");
				pr_warn("new mount options do not match the existing superblock, will be ignored\n");
		 * A root's lifetime is governed by its root cgroup.  Zero
		 * ref indicate that the root is being destroyed.  Wait for
		 * destruction to complete so that the subsystems are free.
		 * We can use wait_queue for the wait but this path is
		 * super cold.  Let's just sleep for a bit and retry.
		 */
		if (!atomic_inc_not_zero(&root->cgrp.refcnt)) {
			mutex_unlock(&cgroup_mutex);
			mutex_unlock(&cgroup_tree_mutex);
			msleep(10);
			mutex_lock(&cgroup_tree_mutex);
			mutex_lock(&cgroup_mutex);
Tejun Heo's avatar
Tejun Heo committed
		goto out_unlock;
	 * No such thing, create a new one.  name= matching without subsys
	 * specification is allowed for already existing hierarchies but we
	 * can't create new one without subsys specification.
	if (!opts.subsys_mask && !opts.none) {
		ret = -EINVAL;
		goto out_unlock;
	root = kzalloc(sizeof(*root), GFP_KERNEL);
	if (!root) {
		ret = -ENOMEM;
Tejun Heo's avatar
Tejun Heo committed
		goto out_unlock;
	init_cgroup_root(root, &opts);

	ret = cgroup_setup_root(root, opts.subsys_mask);
Tejun Heo's avatar
Tejun Heo committed
	if (ret)
		cgroup_free_root(root);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_tree_mutex);
	kfree(opts.release_agent);
	kfree(opts.name);
Tejun Heo's avatar
Tejun Heo committed
	if (ret)
	dentry = kernfs_mount(fs_type, flags, root->kf_root, &new_sb);
	if (IS_ERR(dentry) || !new_sb)
		cgroup_put(&root->cgrp);
Tejun Heo's avatar
Tejun Heo committed
	return dentry;
}

static void cgroup_kill_sb(struct super_block *sb)
{
	struct kernfs_root *kf_root = kernfs_root_from_sb(sb);
	struct cgroup_root *root = cgroup_root_from_kf(kf_root);
	cgroup_put(&root->cgrp);
Tejun Heo's avatar
Tejun Heo committed
	kernfs_kill_sb(sb);
}

static struct file_system_type cgroup_fs_type = {
	.name = "cgroup",
Al Viro's avatar
Al Viro committed
	.mount = cgroup_mount,
	.kill_sb = cgroup_kill_sb,
};

static struct kobject *cgroup_kobj;

 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
 * @task: target task
 * @buf: the buffer to write the path into
 * @buflen: the length of the buffer
 *
 * Determine @task's cgroup on the first (the one with the lowest non-zero
 * hierarchy_id) cgroup hierarchy and copy its path into @buf.  This
 * function grabs cgroup_mutex and shouldn't be used inside locks used by
 * cgroup controller callbacks.
 *
Tejun Heo's avatar
Tejun Heo committed
 * Return value is the same as kernfs_path().
Tejun Heo's avatar
Tejun Heo committed
char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
	struct cgroup_root *root;
Tejun Heo's avatar
Tejun Heo committed
	int hierarchy_id = 1;
	char *path = NULL;

	mutex_lock(&cgroup_mutex);
	root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);

	if (root) {
		cgrp = task_cgroup_from_root(task, root);
Tejun Heo's avatar
Tejun Heo committed
		path = cgroup_path(cgrp, buf, buflen);
	} else {
		/* if no hierarchy exists, everyone is in "/" */
Tejun Heo's avatar
Tejun Heo committed
		if (strlcpy(buf, "/", buflen) < buflen)
			path = buf;
	mutex_unlock(&cgroup_mutex);
Tejun Heo's avatar
Tejun Heo committed
	return path;
EXPORT_SYMBOL_GPL(task_cgroup_path);
/* used to track tasks and other necessary states during migration */
	/* the src and dst cset list running through cset->mg_node */
	struct list_head	src_csets;
	struct list_head	dst_csets;

	/*
	 * Fields for cgroup_taskset_*() iteration.
	 *
	 * Before migration is committed, the target migration tasks are on
	 * ->mg_tasks of the csets on ->src_csets.  After, on ->mg_tasks of
	 * the csets on ->dst_csets.  ->csets point to either ->src_csets
	 * or ->dst_csets depending on whether migration is committed.
	 *
	 * ->cur_csets and ->cur_task point to the current task position
	 * during iteration.
	 */
	struct list_head	*csets;
	struct css_set		*cur_cset;
	struct task_struct	*cur_task;
};

/**
 * cgroup_taskset_first - reset taskset and return the first task
 * @tset: taskset of interest
 *
 * @tset iteration is initialized and the first task is returned.
 */
struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset)
{
	tset->cur_cset = list_first_entry(tset->csets, struct css_set, mg_node);
	tset->cur_task = NULL;

	return cgroup_taskset_next(tset);
}

/**
 * cgroup_taskset_next - iterate to the next task in taskset
 * @tset: taskset of interest
 *
 * Return the next task in @tset.  Iteration must have been initialized
 * with cgroup_taskset_first().
 */
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset)
{
	struct css_set *cset = tset->cur_cset;
	struct task_struct *task = tset->cur_task;
	while (&cset->mg_node != tset->csets) {
		if (!task)
			task = list_first_entry(&cset->mg_tasks,
						struct task_struct, cg_list);
		else
			task = list_next_entry(task, cg_list);
		if (&task->cg_list != &cset->mg_tasks) {
			tset->cur_cset = cset;
			tset->cur_task = task;
			return task;
		}
		cset = list_next_entry(cset, mg_node);
		task = NULL;
	}
 * cgroup_task_migrate - move a task from one cgroup to another.
 * @old_cgrp: the cgroup @tsk is being migrated from
 * @tsk: the task being migrated
 * @new_cset: the new css_set @tsk is being attached to
 * Must be called with cgroup_mutex, threadgroup and css_set_rwsem locked.
static void cgroup_task_migrate(struct cgroup *old_cgrp,
				struct task_struct *tsk,
				struct css_set *new_cset)
	struct css_set *old_cset;
	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

	 * We are synchronized through threadgroup_lock() against PF_EXITING
	 * setting such that we can't race against cgroup_exit() changing the
	 * css_set to init_css_set and dropping the old one.
	old_cset = task_css_set(tsk);
	rcu_assign_pointer(tsk->cgroups, new_cset);
	/*
	 * Use move_tail so that cgroup_taskset_first() still returns the
	 * leader after migration.  This works because cgroup_migrate()
	 * ensures that the dst_cset of the leader is the first on the
	 * tset's dst_csets list.
	 */
	list_move_tail(&tsk->cg_list, &new_cset->mg_tasks);
	 * We just gained a reference on old_cset by taking it from the
	 * task. As trading it for new_cset is protected by cgroup_mutex,
	 * we're safe to drop it here; it will be freed under RCU.
	set_bit(CGRP_RELEASABLE, &old_cgrp->flags);
	put_css_set_locked(old_cset, false);
Li Zefan's avatar
Li Zefan committed
/**
 * cgroup_migrate_finish - cleanup after attach
 * @preloaded_csets: list of preloaded css_sets
 * Undo cgroup_migrate_add_src() and cgroup_migrate_prepare_dst().  See
 * those functions for details.
static void cgroup_migrate_finish(struct list_head *preloaded_csets)
	struct css_set *cset, *tmp_cset;
	lockdep_assert_held(&cgroup_mutex);

	down_write(&css_set_rwsem);
	list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
		cset->mg_src_cgrp = NULL;
		cset->mg_dst_cset = NULL;
		list_del_init(&cset->mg_preload_node);
		put_css_set_locked(cset, false);
	}
	up_write(&css_set_rwsem);
}

/**
 * cgroup_migrate_add_src - add a migration source css_set
 * @src_cset: the source css_set to add
 * @dst_cgrp: the destination cgroup
 * @preloaded_csets: list of preloaded css_sets
 *
 * Tasks belonging to @src_cset are about to be migrated to @dst_cgrp.  Pin
 * @src_cset and add it to @preloaded_csets, which should later be cleaned
 * up by cgroup_migrate_finish().
 *
 * This function may be called without holding threadgroup_lock even if the
 * target is a process.  Threads may be created and destroyed but as long
 * as cgroup_mutex is not dropped, no new css_set can be put into play and
 * the preloaded css_sets are guaranteed to cover all migrations.
 */
static void cgroup_migrate_add_src(struct css_set *src_cset,
				   struct cgroup *dst_cgrp,
				   struct list_head *preloaded_csets)
{
	struct cgroup *src_cgrp;

	lockdep_assert_held(&cgroup_mutex);
	lockdep_assert_held(&css_set_rwsem);

	src_cgrp = cset_cgroup_from_root(src_cset, dst_cgrp->root);

	if (!list_empty(&src_cset->mg_preload_node))
		return;

	WARN_ON(src_cset->mg_src_cgrp);
	WARN_ON(!list_empty(&src_cset->mg_tasks));
	WARN_ON(!list_empty(&src_cset->mg_node));

	src_cset->mg_src_cgrp = src_cgrp;
	get_css_set(src_cset);
	list_add(&src_cset->mg_preload_node, preloaded_csets);
}

/**
 * cgroup_migrate_prepare_dst - prepare destination css_sets for migration
 * @dst_cgrp: the destination cgroup (may be %NULL)
 * @preloaded_csets: list of preloaded source css_sets
 *
 * Tasks are about to be moved to @dst_cgrp and all the source css_sets
 * have been preloaded to @preloaded_csets.  This function looks up and
 * pins all destination css_sets, links each to its source, and append them