Skip to content
cgroup.c 143 KiB
Newer Older
		 * already queued for a userspace notification, queue
		raw_spin_lock(&release_list_lock);
		if (!cgroup_is_dead(cgrp) &&
		    list_empty(&cgrp->release_list)) {
			list_add(&cgrp->release_list, &release_list);
		raw_spin_unlock(&release_list_lock);
		if (need_schedule_work)
			schedule_work(&release_agent_work);
	}
}

/*
 * Notify userspace when a cgroup is released, by running the
 * configured release agent with the name of the cgroup (path
 * relative to the root of cgroup file system) as the argument.
 *
 * Most likely, this user command will try to rmdir this cgroup.
 *
 * This races with the possibility that some other task will be
 * attached to this cgroup before it is removed, or that some other
 * user task will 'mkdir' a child cgroup of this cgroup.  That's ok.
 * The presumed 'rmdir' will fail quietly if this cgroup is no longer
 * unused, and this cgroup will be reprieved from its death sentence,
 * to continue to serve a useful existence.  Next time it's released,
 * we will get notified again, if it still has 'notify_on_release' set.
 *
 * The final arg to call_usermodehelper() is UMH_WAIT_EXEC, which
 * means only wait until the task is successfully execve()'d.  The
 * separate release agent task is forked by call_usermodehelper(),
 * then control in this thread returns here, without waiting for the
 * release agent task.  We don't bother to wait because the caller of
 * this routine has no use for the exit status of the release agent
 * task, so no sense holding our caller up for that.
 */
static void cgroup_release_agent(struct work_struct *work)
{
	BUG_ON(work != &release_agent_work);
	mutex_lock(&cgroup_mutex);
	raw_spin_lock(&release_list_lock);
	while (!list_empty(&release_list)) {
		char *argv[3], *envp[3];
		int i;
Tejun Heo's avatar
Tejun Heo committed
		char *pathbuf = NULL, *agentbuf = NULL, *path;
		struct cgroup *cgrp = list_entry(release_list.next,
		list_del_init(&cgrp->release_list);
		raw_spin_unlock(&release_list_lock);
Tejun Heo's avatar
Tejun Heo committed
		pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
		if (!pathbuf)
			goto continue_free;
Tejun Heo's avatar
Tejun Heo committed
		path = cgroup_path(cgrp, pathbuf, PATH_MAX);
		if (!path)
			goto continue_free;
		agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
		if (!agentbuf)
			goto continue_free;
Tejun Heo's avatar
Tejun Heo committed
		argv[i++] = path;
		argv[i] = NULL;

		i = 0;
		/* minimal command environment */
		envp[i++] = "HOME=/";
		envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
		envp[i] = NULL;

		/* Drop the lock while we invoke the usermode helper,
		 * since the exec could involve hitting disk and hence
		 * be a slow process */
		mutex_unlock(&cgroup_mutex);
		call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
		mutex_lock(&cgroup_mutex);
 continue_free:
		kfree(pathbuf);
		kfree(agentbuf);
		raw_spin_lock(&release_list_lock);
	raw_spin_unlock(&release_list_lock);

static int __init cgroup_disable(char *str)
{
	struct cgroup_subsys *ss;

	while ((token = strsep(&str, ",")) != NULL) {
		if (!*token)
			continue;
Tejun Heo's avatar
Tejun Heo committed
		for_each_subsys(ss, i) {
			if (!strcmp(token, ss->name)) {
				ss->disabled = 1;
				printk(KERN_INFO "Disabling %s control group"
					" subsystem\n", ss->name);
				break;
			}
		}
	}
	return 1;
}
__setup("cgroup_disable=", cgroup_disable);
 * css_tryget_from_dir - get corresponding css from the dentry of a cgroup dir
 * @dentry: directory dentry of interest
 * @ss: subsystem of interest
 * If @dentry is a directory for a cgroup which has @ss enabled on it, try
 * to get the corresponding css and return it.  If such css doesn't exist
 * or can't be pinned, an ERR_PTR value is returned.
struct cgroup_subsys_state *css_tryget_from_dir(struct dentry *dentry,
						struct cgroup_subsys *ss)
Tejun Heo's avatar
Tejun Heo committed
	struct kernfs_node *kn = kernfs_node_from_dentry(dentry);
	struct cgroup_subsys_state *css = NULL;
	struct cgroup *cgrp;

Tejun Heo's avatar
Tejun Heo committed
	if (dentry->d_sb->s_type != &cgroup_fs_type || !kn ||
	    kernfs_type(kn) != KERNFS_DIR)
		return ERR_PTR(-EBADF);

Tejun Heo's avatar
Tejun Heo committed
	/*
	 * This path doesn't originate from kernfs and @kn could already
	 * have been or be removed at any point.  @kn->priv is RCU
	 * protected for this access.  See destroy_locked() for details.
	 */
	cgrp = rcu_dereference(kn->priv);
	if (cgrp)
		css = cgroup_css(cgrp, ss);

	if (!css || !css_tryget(css))
		css = ERR_PTR(-ENOENT);

	rcu_read_unlock();
	return css;
/**
 * css_from_id - lookup css by id
 * @id: the cgroup id
 * @ss: cgroup subsys to be looked into
 *
 * Returns the css if there's valid one with @id, otherwise returns NULL.
 * Should be called under rcu_read_lock().
 */
struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss)
{
	struct cgroup *cgrp;

	cgroup_assert_mutexes_or_rcu_locked();

	cgrp = idr_find(&ss->root->cgroup_idr, id);
	if (cgrp)
		return cgroup_css(cgrp, ss);
static struct cgroup_subsys_state *
debug_css_alloc(struct cgroup_subsys_state *parent_css)
{
	struct cgroup_subsys_state *css = kzalloc(sizeof(*css), GFP_KERNEL);

	if (!css)
		return ERR_PTR(-ENOMEM);

	return css;
}

static void debug_css_free(struct cgroup_subsys_state *css)
static u64 debug_taskcount_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
	return cgroup_task_count(css->cgroup);
static u64 current_css_set_read(struct cgroup_subsys_state *css,
				struct cftype *cft)
static u64 current_css_set_refcount_read(struct cgroup_subsys_state *css,
Li Zefan's avatar
Li Zefan committed
					 struct cftype *cft)
	count = atomic_read(&task_css_set(current)->refcount);
static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
	struct cgrp_cset_link *link;
Tejun Heo's avatar
Tejun Heo committed
	char *name_buf;

	name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
	if (!name_buf)
		return -ENOMEM;
	cset = rcu_dereference(current->cgroups);
	list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
		cgroup_name(c, name_buf, NAME_MAX + 1);
		seq_printf(seq, "Root %d group %s\n",
			   c->root->hierarchy_id, name_buf);
Tejun Heo's avatar
Tejun Heo committed
	kfree(name_buf);
static int cgroup_css_links_read(struct seq_file *seq, void *v)
	struct cgroup_subsys_state *css = seq_css(seq);
	struct cgrp_cset_link *link;
	list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
		struct css_set *cset = link->cset;
		struct task_struct *task;
		int count = 0;
		seq_printf(seq, "css_set %p\n", cset);
		list_for_each_entry(task, &cset->tasks, cg_list) {
Tejun Heo's avatar
Tejun Heo committed
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
		}

		list_for_each_entry(task, &cset->mg_tasks, cg_list) {
			if (count++ > MAX_TASKS_SHOWN_PER_CSS)
				goto overflow;
			seq_printf(seq, "  task %d\n", task_pid_vnr(task));
Tejun Heo's avatar
Tejun Heo committed
		continue;
	overflow:
		seq_puts(seq, "  ...\n");
static u64 releasable_read(struct cgroup_subsys_state *css, struct cftype *cft)
	return test_bit(CGRP_RELEASABLE, &css->cgroup->flags);
}

static struct cftype debug_files[] =  {
	{
		.name = "taskcount",
		.read_u64 = debug_taskcount_read,
	},

	{
		.name = "current_css_set",
		.read_u64 = current_css_set_read,
	},

	{
		.name = "current_css_set_refcount",
		.read_u64 = current_css_set_refcount_read,
	},

		.seq_show = current_css_set_cg_links_read,
		.seq_show = cgroup_css_links_read,
struct cgroup_subsys debug_cgrp_subsys = {
	.css_alloc = debug_css_alloc,
	.css_free = debug_css_free,