Skip to content
cgroup.c 142 KiB
Newer Older
 * css destruction is four-stage process.
 *
 * 1. Destruction starts.  Killing of the percpu_ref is initiated.
 *    Implemented in kill_css().
 *
 * 2. When the percpu_ref is confirmed to be visible as killed on all CPUs
 *    and thus css_tryget() is guaranteed to fail, the css can be offlined
 *    by invoking offline_css().  After offlining, the base ref is put.
 *    Implemented in css_killed_work_fn().
 *
 * 3. When the percpu_ref reaches zero, the only possible remaining
 *    accessors are inside RCU read sections.  css_release() schedules the
 *    RCU callback.
 *
 * 4. After the grace period, the css can be freed.  Implemented in
 *    css_free_work_fn().
 *
 * It is actually hairier because both step 2 and 4 require process context
 * and thus involve punting to css->destroy_work adding two additional
 * steps to the already complex sequence.
 */
static void css_free_work_fn(struct work_struct *work)
{
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
	struct cgroup *cgrp = css->cgroup;
	if (css->parent)
		css_put(css->parent);

	css->ss->css_free(css);
	cgroup_dput(cgrp);
static void css_free_rcu_fn(struct rcu_head *rcu_head)
{
	struct cgroup_subsys_state *css =
		container_of(rcu_head, struct cgroup_subsys_state, rcu_head);
	/*
	 * css holds an extra ref to @cgrp->dentry which is put on the last
	 * css_put().  dput() requires process context which we don't have.
	 */
	INIT_WORK(&css->destroy_work, css_free_work_fn);
	queue_work(cgroup_destroy_wq, &css->destroy_work);
static void css_release(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

	rcu_assign_pointer(css->cgroup->subsys[css->ss->id], NULL);
	call_rcu(&css->rcu_head, css_free_rcu_fn);
static void init_css(struct cgroup_subsys_state *css, struct cgroup_subsys *ss,
		     struct cgroup *cgrp)

	if (cgrp->parent)
		css->parent = cgroup_css(cgrp->parent, ss);
/* invoke ->css_online() on a new CSS and mark it online if successful */
static int online_css(struct cgroup_subsys_state *css)
	struct cgroup_subsys *ss = css->ss;
	lockdep_assert_held(&cgroup_tree_mutex);
	lockdep_assert_held(&cgroup_mutex);

		css->cgroup->nr_css++;
		rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
	return ret;
/* if the CSS is online, invoke ->css_offline() on it and mark it offline */
static void offline_css(struct cgroup_subsys_state *css)
	struct cgroup_subsys *ss = css->ss;
	lockdep_assert_held(&cgroup_tree_mutex);
	lockdep_assert_held(&cgroup_mutex);

	if (!(css->flags & CSS_ONLINE))
		return;

	if (ss->css_offline)
	RCU_INIT_POINTER(css->cgroup->subsys[ss->id], css);
/**
 * create_css - create a cgroup_subsys_state
 * @cgrp: the cgroup new css will be associated with
 * @ss: the subsys of new css
 *
 * Create a new css associated with @cgrp - @ss pair.  On success, the new
 * css is online and installed in @cgrp with all interface files created.
 * Returns 0 on success, -errno on failure.
 */
static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
{
	struct cgroup *parent = cgrp->parent;
	struct cgroup_subsys_state *css;
	int err;

	lockdep_assert_held(&cgrp->dentry->d_inode->i_mutex);
	lockdep_assert_held(&cgroup_mutex);

	css = ss->css_alloc(cgroup_css(parent, ss));
	if (IS_ERR(css))
		return PTR_ERR(css);

	err = percpu_ref_init(&css->refcnt, css_release);
	if (err)
		goto err_free;

	init_css(css, ss, cgrp);

	err = cgroup_populate_dir(cgrp, 1 << ss->id);
	if (err)
		goto err_free;

	err = online_css(css);
	if (err)
		goto err_free;

	cgroup_get(cgrp);
	css_get(css->parent);

	if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
	    parent->parent) {
		pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
			   current->comm, current->pid, ss->name);
		if (!strcmp(ss->name, "memory"))
			pr_warning("cgroup: \"memory\" requires setting use_hierarchy to 1 on the root.\n");
		ss->warned_broken_hierarchy = true;
	}

	return 0;

err_free:
	percpu_ref_cancel_init(&css->refcnt);
	ss->css_free(css);
	return err;
}

Li Zefan's avatar
Li Zefan committed
 * cgroup_create - create a cgroup
 * @parent: cgroup that will be parent of the new cgroup
 * @dentry: dentry of the new cgroup
 * @mode: mode to set on new inode
Li Zefan's avatar
Li Zefan committed
 * Must be called with the mutex on the parent inode held
 */
static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
Al Viro's avatar
Al Viro committed
			     umode_t mode)
	struct cgroup_name *name;
	struct cgroupfs_root *root = parent->root;
	struct cgroup_subsys *ss;
	struct super_block *sb = root->sb;

Tejun Heo's avatar
Tejun Heo committed
	/* allocate the cgroup and its ID, 0 is reserved for the root */
	cgrp = kzalloc(sizeof(*cgrp), GFP_KERNEL);
	if (!cgrp)
	name = cgroup_alloc_name(dentry->d_name.name);
	if (!name) {
		err = -ENOMEM;
		goto err_free_cgrp;
	rcu_assign_pointer(cgrp->name, name);

	mutex_lock(&cgroup_tree_mutex);

	/*
	 * Only live parents can have children.  Note that the liveliness
	 * check isn't strictly necessary because cgroup_mkdir() and
	 * cgroup_rmdir() are fully synchronized by i_mutex; however, do it
	 * anyway so that locking is contained inside cgroup proper and we
	 * don't get nasty surprises if we ever grow another caller.
	 */
	if (!cgroup_lock_live_group(parent)) {
		err = -ENODEV;
		goto err_unlock_tree;
	}

	/*
	 * Temporarily set the pointer to NULL, so idr_find() won't return
	 * a half-baked cgroup.
	 */
	cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
	if (cgrp->id < 0) {
		err = -ENOMEM;
		goto err_unlock;
	/* Grab a reference on the superblock so the hierarchy doesn't
	 * get deleted on unmount if there are child cgroups.  This
	 * can be done outside cgroup_mutex, since the sb can't
	 * disappear while someone has an open control file on the
	 * fs */
	cgroup_get_root(root);
	init_cgroup_housekeeping(cgrp);
	dentry->d_fsdata = cgrp;
	cgrp->dentry = dentry;

	cgrp->dummy_css.parent = &parent->dummy_css;
	if (notify_on_release(parent))
		set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);

	if (test_bit(CGRP_CPUSET_CLONE_CHILDREN, &parent->flags))
		set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
	/*
	 * Create directory.  cgroup_create_file() returns with the new
	 * directory locked on success so that it can be populated without
	 * dropping cgroup_mutex.
	 */
	err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
	lockdep_assert_held(&dentry->d_inode->i_mutex);
	cgrp->serial_nr = cgroup_serial_nr_next++;
	/* allocation complete, commit to creation */
	list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
	root->number_of_cgroups++;
	/* hold a ref to the parent's dentry */
	cgroup_get(parent);
	/*
	 * @cgrp is now fully operational.  If something fails after this
	 * point, it'll be released via the normal destruction path.
	 */
	idr_replace(&root->cgroup_idr, cgrp, cgrp->id);

	err = cgroup_addrm_files(cgrp, cgroup_base_files, true);
	/* let's create and online css's */
	for_each_subsys(ss, ssid) {
		if (root->subsys_mask & (1 << ssid)) {
			err = create_css(cgrp, ss);
			if (err)
				goto err_destroy;
		}

	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_tree_mutex);
	mutex_unlock(&cgrp->dentry->d_inode->i_mutex);
Tejun Heo's avatar
Tejun Heo committed
err_free_id:
	idr_remove(&root->cgroup_idr, cgrp->id);
	/* Release the reference count that we took on the superblock */
	cgroup_put_root(root);
err_unlock:
	mutex_unlock(&cgroup_mutex);
err_unlock_tree:
	mutex_unlock(&cgroup_tree_mutex);
	kfree(rcu_dereference_raw(cgrp->name));
err_free_cgrp:

err_destroy:
	cgroup_destroy_locked(cgrp);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_tree_mutex);
	mutex_unlock(&dentry->d_inode->i_mutex);
	return err;
static int cgroup_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
	struct cgroup *c_parent = dentry->d_parent->d_fsdata;

	/* the vfs holds inode->i_mutex already */
	return cgroup_create(c_parent, dentry, mode | S_IFDIR);
}

/*
 * This is called when the refcnt of a css is confirmed to be killed.
 * css_tryget() is now guaranteed to fail.
 */
static void css_killed_work_fn(struct work_struct *work)
	struct cgroup_subsys_state *css =
		container_of(work, struct cgroup_subsys_state, destroy_work);
	struct cgroup *cgrp = css->cgroup;
	mutex_lock(&cgroup_tree_mutex);
	mutex_lock(&cgroup_mutex);

	/*
	 * css_tryget() is guaranteed to fail now.  Tell subsystems to
	 * initate destruction.
	 */
	offline_css(css);

	/*
	 * If @cgrp is marked dead, it's waiting for refs of all css's to
	 * be disabled before proceeding to the second phase of cgroup
	 * destruction.  If we are the last one, kick it off.
	 */
	if (!cgrp->nr_css && cgroup_is_dead(cgrp))
		cgroup_destroy_css_killed(cgrp);

	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_tree_mutex);

	/*
	 * Put the css refs from kill_css().  Each css holds an extra
	 * reference to the cgroup's dentry and cgroup removal proceeds
	 * regardless of css refs.  On the last put of each css, whenever
	 * that may be, the extra dentry ref is put so that dentry
	 * destruction happens only after all css's are released.
	 */
	css_put(css);
/* css kill confirmation processing requires process context, bounce */
static void css_killed_ref_fn(struct percpu_ref *ref)
{
	struct cgroup_subsys_state *css =
		container_of(ref, struct cgroup_subsys_state, refcnt);

	INIT_WORK(&css->destroy_work, css_killed_work_fn);
	queue_work(cgroup_destroy_wq, &css->destroy_work);
Tejun Heo's avatar
Tejun Heo committed
/**
 * kill_css - destroy a css
 * @css: css to destroy
 *
 * This function initiates destruction of @css by removing cgroup interface
 * files and putting its base reference.  ->css_offline() will be invoked
 * asynchronously once css_tryget() is guaranteed to fail and when the
 * reference count reaches zero, @css will be released.
Tejun Heo's avatar
Tejun Heo committed
 */
static void kill_css(struct cgroup_subsys_state *css)
{
	cgroup_clear_dir(css->cgroup, 1 << css->ss->id);
Tejun Heo's avatar
Tejun Heo committed
	/*
	 * Killing would put the base ref, but we need to keep it alive
	 * until after ->css_offline().
	 */
	css_get(css);

	/*
	 * cgroup core guarantees that, by the time ->css_offline() is
	 * invoked, no new css reference will be given out via
	 * css_tryget().  We can't simply call percpu_ref_kill() and
	 * proceed to offlining css's because percpu_ref_kill() doesn't
	 * guarantee that the ref is seen as killed on all CPUs on return.
	 *
	 * Use percpu_ref_kill_and_confirm() to get notifications as each
	 * css is confirmed to be seen as killed on all CPUs.
	 */
	percpu_ref_kill_and_confirm(&css->refcnt, css_killed_ref_fn);
}

/**
 * cgroup_destroy_locked - the first stage of cgroup destruction
 * @cgrp: cgroup to be destroyed
 *
 * css's make use of percpu refcnts whose killing latency shouldn't be
 * exposed to userland and are RCU protected.  Also, cgroup core needs to
 * guarantee that css_tryget() won't succeed by the time ->css_offline() is
 * invoked.  To satisfy all the requirements, destruction is implemented in
 * the following two steps.
 *
 * s1. Verify @cgrp can be destroyed and mark it dying.  Remove all
 *     userland visible parts and start killing the percpu refcnts of
 *     css's.  Set up so that the next stage will be kicked off once all
 *     the percpu refcnts are confirmed to be killed.
 *
 * s2. Invoke ->css_offline(), mark the cgroup dead and proceed with the
 *     rest of destruction.  Once all cgroup references are gone, the
 *     cgroup is RCU-freed.
 *
 * This function implements s1.  After this step, @cgrp is gone as far as
 * the userland is concerned and a new cgroup with the same name may be
 * created.  As cgroup doesn't care about the names internally, this
 * doesn't cause any problem.
 */
static int cgroup_destroy_locked(struct cgroup *cgrp)
	__releases(&cgroup_mutex) __acquires(&cgroup_mutex)
	struct dentry *d = cgrp->dentry;
	struct cgroup_subsys_state *css;
	struct cgroup *child;
	int ssid;
	lockdep_assert_held(&d->d_inode->i_mutex);
	lockdep_assert_held(&cgroup_tree_mutex);
	lockdep_assert_held(&cgroup_mutex);

	 * css_set_lock synchronizes access to ->cset_links and prevents
	 * @cgrp from being removed while __put_css_set() is in progress.
	 */
	read_lock(&css_set_lock);
	empty = list_empty(&cgrp->cset_links);
	read_unlock(&css_set_lock);
	if (!empty)
Li Zefan's avatar
Li Zefan committed

	/*
	 * Make sure there's no live children.  We can't test ->children
	 * emptiness as dead children linger on it while being destroyed;
	 * otherwise, "rmdir parent/child parent" may fail with -EBUSY.
	 */
	empty = true;
	rcu_read_lock();
	list_for_each_entry_rcu(child, &cgrp->children, sibling) {
		empty = cgroup_is_dead(child);
		if (!empty)
			break;
	}
	rcu_read_unlock();
	if (!empty)
		return -EBUSY;

Tejun Heo's avatar
Tejun Heo committed
	 * Initiate massacre of all css's.  cgroup_destroy_css_killed()
	 * will be invoked to perform the rest of destruction once the
	 * percpu refs of all css's are confirmed to be killed.  This
	 * involves removing the subsystem's files, drop cgroup_mutex.
	mutex_unlock(&cgroup_mutex);
	for_each_css(css, ssid, cgrp)
		kill_css(css);
	mutex_lock(&cgroup_mutex);

	/*
	 * Mark @cgrp dead.  This prevents further task migration and child
	 * creation by disabling cgroup_lock_live_group().  Note that
	 * CGRP_DEAD assertion is depended upon by css_next_child() to
	 * resume iteration after dropping RCU read lock.  See
	set_bit(CGRP_DEAD, &cgrp->flags);
	/* CGRP_DEAD is set, remove from ->release_list for the last time */
	raw_spin_lock(&release_list_lock);
	if (!list_empty(&cgrp->release_list))
		list_del_init(&cgrp->release_list);
	raw_spin_unlock(&release_list_lock);

	/*
	 * If @cgrp has css's attached, the second stage of cgroup
	 * destruction is kicked off from css_killed_work_fn() after the
	 * refs of all attached css's are killed.  If @cgrp doesn't have
	 * any css, we kick it off here.
	 */
	if (!cgrp->nr_css)
		cgroup_destroy_css_killed(cgrp);

	 * Clear the base files and remove @cgrp directory.  The removal
	 * puts the base ref but we aren't quite done with @cgrp yet, so
	 * hold onto it.
	mutex_unlock(&cgroup_mutex);
	cgroup_addrm_files(cgrp, cgroup_base_files, false);
	dget(d);
	cgroup_d_remove_dir(d);
	mutex_lock(&cgroup_mutex);
 * cgroup_destroy_css_killed - the second step of cgroup destruction
 * @work: cgroup->destroy_free_work
 *
 * This function is invoked from a work item for a cgroup which is being
 * destroyed after all css's are offlined and performs the rest of
 * destruction.  This is the second step of destruction described in the
 * comment above cgroup_destroy_locked().
static void cgroup_destroy_css_killed(struct cgroup *cgrp)
{
	struct cgroup *parent = cgrp->parent;

	lockdep_assert_held(&cgroup_tree_mutex);
	lockdep_assert_held(&cgroup_mutex);
	/* delete this cgroup from parent->children */
	list_del_rcu(&cgrp->sibling);
	cgroup_put(cgrp);
	set_bit(CGRP_RELEASABLE, &parent->flags);
static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
{
	int ret;

	mutex_lock(&cgroup_tree_mutex);
	mutex_lock(&cgroup_mutex);
	ret = cgroup_destroy_locked(dentry->d_fsdata);
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_tree_mutex);
static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
{
	struct cgroup_subsys_state *css;
Diego Calleja's avatar
Diego Calleja committed

	printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name);
	mutex_lock(&cgroup_tree_mutex);
	mutex_lock(&cgroup_mutex);

	INIT_LIST_HEAD(&ss->cftsets);
	/* Create the top cgroup state for this subsystem */
	ss->root = &cgroup_dummy_root;
	css = ss->css_alloc(cgroup_css(cgroup_dummy_top, ss));
	/* We don't handle early failures gracefully */
	BUG_ON(IS_ERR(css));
	init_css(css, ss, cgroup_dummy_top);
	/* Update the init_css_set to contain a subsys
	 * pointer to this state - since the subsystem is
	 * newly registered, all tasks and hence the
	 * init_css_set is in the subsystem's top cgroup. */
	init_css_set.subsys[ss->id] = css;

	need_forkexit_callback |= ss->fork || ss->exit;

	/* At system boot, before all subsystems have been
	 * registered, no tasks have been forked, so we don't
	 * need to invoke fork callbacks here. */
	BUG_ON(!list_empty(&init_task.tasks));

	BUG_ON(online_css(css));
	mutex_unlock(&cgroup_mutex);
	mutex_unlock(&cgroup_tree_mutex);
Li Zefan's avatar
Li Zefan committed
 * cgroup_init_early - cgroup initialization at system boot
 *
 * Initialize cgroups at system boot, and initialize any
 * subsystems that request early init.
 */
int __init cgroup_init_early(void)
{
	struct cgroup_subsys *ss;
	atomic_set(&init_css_set.refcount, 1);
	INIT_LIST_HEAD(&init_css_set.cgrp_links);
	INIT_LIST_HEAD(&init_css_set.tasks);
	INIT_HLIST_NODE(&init_css_set.hlist);
	init_cgroup_root(&cgroup_dummy_root);
	cgroup_root_count = 1;
	RCU_INIT_POINTER(init_task.cgroups, &init_css_set);
	init_cgrp_cset_link.cset = &init_css_set;
	init_cgrp_cset_link.cgrp = cgroup_dummy_top;
	list_add(&init_cgrp_cset_link.cset_link, &cgroup_dummy_top->cset_links);
	list_add(&init_cgrp_cset_link.cgrp_link, &init_css_set.cgrp_links);
Tejun Heo's avatar
Tejun Heo committed
	for_each_subsys(ss, i) {
		WARN(!ss->css_alloc || !ss->css_free || ss->name || ss->id,
		     "invalid cgroup_subsys %d:%s css_alloc=%p css_free=%p name:id=%d:%s\n",
		     i, cgroup_subsys_name[i], ss->css_alloc, ss->css_free,
		     ss->id, ss->name);
		WARN(strlen(cgroup_subsys_name[i]) > MAX_CGROUP_TYPE_NAMELEN,
		     "cgroup_subsys_name %s too long\n", cgroup_subsys_name[i]);

		ss->name = cgroup_subsys_name[i];

		if (ss->early_init)
			cgroup_init_subsys(ss);
	}
	return 0;
}

/**
Li Zefan's avatar
Li Zefan committed
 * cgroup_init - cgroup initialization
 *
 * Register cgroup filesystem and /proc file, and initialize
 * any subsystems that didn't request early init.
 */
int __init cgroup_init(void)
{
	struct cgroup_subsys *ss;
	unsigned long key;

	err = bdi_init(&cgroup_backing_dev_info);
	if (err)
		return err;
	cgroup_init_cftypes(NULL, cgroup_base_files);

Tejun Heo's avatar
Tejun Heo committed
	for_each_subsys(ss, i) {
		if (!ss->early_init)
			cgroup_init_subsys(ss);

		/*
		 * cftype registration needs kmalloc and can't be done
		 * during early_init.  Register base cftypes separately.
		 */
		if (ss->base_cftypes)
			WARN_ON(cgroup_add_cftypes(ss, ss->base_cftypes));
	/* allocate id for the dummy hierarchy */
	mutex_lock(&cgroup_mutex);

	/* Add init_css_set to the hash table */
	key = css_set_hash(init_css_set.subsys);
	hash_add(css_set_table, &init_css_set.hlist, key);

	BUG_ON(cgroup_init_root_id(&cgroup_dummy_root, 0, 1));
	err = idr_alloc(&cgroup_dummy_root.cgroup_idr, cgroup_dummy_top,
			0, 1, GFP_KERNEL);
	BUG_ON(err < 0);

	mutex_unlock(&cgroup_mutex);

	cgroup_kobj = kobject_create_and_add("cgroup", fs_kobj);
	if (!cgroup_kobj) {
		err = -ENOMEM;
		goto out;
	}

	err = register_filesystem(&cgroup_fs_type);
	if (err < 0) {
		kobject_put(cgroup_kobj);
	proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations);
	if (err)
		bdi_destroy(&cgroup_backing_dev_info);

static int __init cgroup_wq_init(void)
{
	/*
	 * There isn't much point in executing destruction path in
	 * parallel.  Good chunk is serialized with cgroup_mutex anyway.
	 *
	 * XXX: Must be ordered to make sure parent is offlined after
	 * children.  The ordering requirement is for memcg where a
	 * parent's offline may wait for a child's leading to deadlock.  In
	 * the long term, this should be fixed from memcg side.
	 *
	 * We would prefer to do this in cgroup_init() above, but that
	 * is called before init_workqueues(): so leave this until after.
	 */
	cgroup_destroy_wq = alloc_ordered_workqueue("cgroup_destroy", 0);
	BUG_ON(!cgroup_destroy_wq);

	/*
	 * Used to destroy pidlists and separate to serve as flush domain.
	 * Cap @max_active to 1 too.
	 */
	cgroup_pidlist_destroy_wq = alloc_workqueue("cgroup_pidlist_destroy",
						    0, 1);
	BUG_ON(!cgroup_pidlist_destroy_wq);

	return 0;
}
core_initcall(cgroup_wq_init);

/*
 * proc_cgroup_show()
 *  - Print task's cgroup paths into seq_file, one line for each hierarchy
 *  - Used for /proc/<pid>/cgroup.
 *  - No need to task_lock(tsk) on this tsk->cgroup reference, as it
 *    doesn't really matter if tsk->cgroup changes after we read it,
 *    and we take cgroup_mutex, keeping cgroup_attach_task() from changing it
 *    anyway.  No need to check that tsk->cgroup != NULL, thanks to
 *    the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks
 *    cgroup to top_cgroup.
 */

/* TODO: Use a proper seq_file iterator */
int proc_cgroup_show(struct seq_file *m, void *v)
{
	struct pid *pid;
	struct task_struct *tsk;
	char *buf;
	int retval;
	struct cgroupfs_root *root;

	retval = -ENOMEM;
	buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
	if (!buf)
		goto out;

	retval = -ESRCH;
	pid = m->private;
	tsk = get_pid_task(pid, PIDTYPE_PID);
	if (!tsk)
		goto out_free;

	retval = 0;

	mutex_lock(&cgroup_mutex);

	for_each_active_root(root) {
		struct cgroup_subsys *ss;
		int ssid, count = 0;
		seq_printf(m, "%d:", root->hierarchy_id);
		for_each_subsys(ss, ssid)
			if (root->subsys_mask & (1 << ssid))
				seq_printf(m, "%s%s", count++ ? "," : "", ss->name);
		if (strlen(root->name))
			seq_printf(m, "%sname=%s", count ? "," : "",
				   root->name);
		seq_putc(m, ':');
		cgrp = task_cgroup_from_root(tsk, root);
		retval = cgroup_path(cgrp, buf, PAGE_SIZE);
		if (retval < 0)
			goto out_unlock;
		seq_puts(m, buf);
		seq_putc(m, '\n');
	}

out_unlock:
	mutex_unlock(&cgroup_mutex);
	put_task_struct(tsk);
out_free:
	kfree(buf);
out:
	return retval;
}

/* Display information about each subsystem and each hierarchy */
static int proc_cgroupstats_show(struct seq_file *m, void *v)
{
	struct cgroup_subsys *ss;
	seq_puts(m, "#subsys_name\thierarchy\tnum_cgroups\tenabled\n");
Ben Blum's avatar
Ben Blum committed
	/*
	 * ideally we don't want subsystems moving around while we do this.
	 * cgroup_mutex is also necessary to guarantee an atomic snapshot of
	 * subsys/hierarchy state.
	 */
	mutex_lock(&cgroup_mutex);

	for_each_subsys(ss, i)
		seq_printf(m, "%s\t%d\t%d\t%d\n",
			   ss->name, ss->root->hierarchy_id,
			   ss->root->number_of_cgroups, !ss->disabled);
	mutex_unlock(&cgroup_mutex);
	return 0;
}

static int cgroupstats_open(struct inode *inode, struct file *file)
{
	return single_open(file, proc_cgroupstats_show, NULL);
static const struct file_operations proc_cgroupstats_operations = {
	.open = cgroupstats_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

/**
 * cgroup_fork - attach newly forked task to its parents cgroup.
Li Zefan's avatar
Li Zefan committed
 * @child: pointer to task_struct of forking parent process.
 *
 * Description: A task inherits its parent's cgroup at fork().
 *
 * A pointer to the shared css_set was automatically copied in
 * fork.c by dup_task_struct().  However, we ignore that copy, since
 * it was not made under the protection of RCU or cgroup_mutex, so
 * might no longer be a valid cgroup pointer.  cgroup_attach_task() might
 * have already changed current->cgroups, allowing the previously
 * referenced cgroup group to be removed and freed.
 *
 * At the point that cgroup_fork() is called, 'current' is the parent
 * task, and the passed argument 'child' points to the child task.
 */
void cgroup_fork(struct task_struct *child)
{
	get_css_set(task_css_set(current));
	child->cgroups = current->cgroups;
	INIT_LIST_HEAD(&child->cg_list);
Li Zefan's avatar
Li Zefan committed
 * cgroup_post_fork - called on a new task after adding it to the task list
 * @child: the task in question
 *
 * Adds the task to the list running through its css_set if necessary and
 * call the subsystem fork() callbacks.  Has to be after the task is
 * visible on the task list in case we race with the first call to
 * cgroup_task_iter_start() - to guarantee that the new task ends up on its
Li Zefan's avatar
Li Zefan committed
 */
void cgroup_post_fork(struct task_struct *child)
{
	struct cgroup_subsys *ss;
	/*
	 * use_task_css_set_links is set to 1 before we walk the tasklist
	 * under the tasklist_lock and we read it here after we added the child
	 * to the tasklist under the tasklist_lock as well. If the child wasn't
	 * yet in the tasklist when we walked through it from
	 * cgroup_enable_task_cg_lists(), then use_task_css_set_links value
	 * should be visible now due to the paired locking and barriers implied
	 * by LOCK/UNLOCK: it is written before the tasklist_lock unlock
	 * in cgroup_enable_task_cg_lists() and read here after the tasklist_lock
	 * lock on fork.
	 */
	if (use_task_css_set_links) {
		write_lock(&css_set_lock);
		task_lock(child);
		if (list_empty(&child->cg_list))
			list_add(&child->cg_list, &task_css_set(child)->tasks);
		write_unlock(&css_set_lock);
	}

	/*
	 * Call ss->fork().  This must happen after @child is linked on
	 * css_set; otherwise, @child might change state between ->fork()
	 * and addition to css_set.
	 */
	if (need_forkexit_callback) {
Tejun Heo's avatar
Tejun Heo committed
		for_each_subsys(ss, i)
/**
 * cgroup_exit - detach cgroup from exiting task
 * @tsk: pointer to task_struct of exiting process
Li Zefan's avatar
Li Zefan committed
 * @run_callback: run exit callbacks?
 *
 * Description: Detach cgroup from @tsk and release it.
 *
 * Note that cgroups marked notify_on_release force every task in
 * them to take the global cgroup_mutex mutex when exiting.
 * This could impact scaling on very large systems.  Be reluctant to
 * use notify_on_release cgroups where very high task exit scaling
 * is required on large systems.
 *
 * the_top_cgroup_hack:
 *
 *    Set the exiting tasks cgroup to the root cgroup (top_cgroup).
 *
 *    We call cgroup_exit() while the task is still competent to
 *    handle notify_on_release(), then leave the task attached to the
 *    root cgroup in each hierarchy for the remainder of its exit.
 *
 *    To do this properly, we would increment the reference count on
 *    top_cgroup, and near the very end of the kernel/exit.c do_exit()
 *    code we would add a second cgroup function call, to drop that
 *    reference.  This would just create an unnecessary hot spot on
 *    the top_cgroup reference count, to no avail.
 *
 *    Normally, holding a reference to a cgroup without bumping its
 *    count is unsafe.   The cgroup could go away, or someone could
 *    attach us to a different cgroup, decrementing the count on
 *    the first cgroup that we never incremented.  But in this case,
 *    top_cgroup isn't going away, and either task has PF_EXITING set,
 *    which wards off any cgroup_attach_task() attempts, or task is a failed
 *    fork, never visible to cgroup_attach_task.
 */
void cgroup_exit(struct task_struct *tsk, int run_callbacks)
{
	struct cgroup_subsys *ss;

	/*
	 * Unlink from the css_set task list if necessary.
	 * Optimistically check cg_list before taking
	 * css_set_lock
	 */
	if (!list_empty(&tsk->cg_list)) {
		write_lock(&css_set_lock);
		if (!list_empty(&tsk->cg_list))
			list_del_init(&tsk->cg_list);
	/* Reassign the task to the init_css_set. */
	task_lock(tsk);
	cset = task_css_set(tsk);
	RCU_INIT_POINTER(tsk->cgroups, &init_css_set);

	if (run_callbacks && need_forkexit_callback) {
Tejun Heo's avatar
Tejun Heo committed
		/* see cgroup_post_fork() for details */
		for_each_subsys(ss, i) {
			if (ss->exit) {
				struct cgroup_subsys_state *old_css = cset->subsys[i];
				struct cgroup_subsys_state *css = task_css(tsk, i);
	task_unlock(tsk);
	put_css_set_taskexit(cset);
static void check_for_release(struct cgroup *cgrp)
	if (cgroup_is_releasable(cgrp) &&
	    list_empty(&cgrp->cset_links) && list_empty(&cgrp->children)) {
		/*
		 * Control Group is currently removeable. If it's not
		 * already queued for a userspace notification, queue
		raw_spin_lock(&release_list_lock);
		if (!cgroup_is_dead(cgrp) &&
		    list_empty(&cgrp->release_list)) {
			list_add(&cgrp->release_list, &release_list);
		raw_spin_unlock(&release_list_lock);
		if (need_schedule_work)
			schedule_work(&release_agent_work);
	}
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.