Skip to content
file.c 61.4 KiB
Newer Older
/*
 * SPU file system -- file contents
 *
 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
 *
 * Author: Arnd Bergmann <arndb@de.ibm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2, or (at your option)
 * any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/fs.h>
#include <linux/ioctl.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/ptrace.h>
#include <linux/marker.h>

#include <asm/io.h>
#include <asm/time.h>
#include <asm/spu.h>
#include <asm/uaccess.h>

#include "spufs.h"

#define SPUFS_MMAP_4K (PAGE_SIZE == 0x1000)

/* Simple attribute files */
struct spufs_attr {
	int (*get)(void *, u64 *);
	int (*set)(void *, u64);
	char get_buf[24];       /* enough to store a u64 and "\n\0" */
	char set_buf[24];
	void *data;
	const char *fmt;        /* format for read operation */
	struct mutex mutex;     /* protects access to these buffers */
};

static int spufs_attr_open(struct inode *inode, struct file *file,
		int (*get)(void *, u64 *), int (*set)(void *, u64),
		const char *fmt)
{
	struct spufs_attr *attr;

	attr = kmalloc(sizeof(*attr), GFP_KERNEL);
	if (!attr)
		return -ENOMEM;

	attr->get = get;
	attr->set = set;
	attr->data = inode->i_private;
	attr->fmt = fmt;
	mutex_init(&attr->mutex);
	file->private_data = attr;

	return nonseekable_open(inode, file);
}

static int spufs_attr_release(struct inode *inode, struct file *file)
{
       kfree(file->private_data);
	return 0;
}

static ssize_t spufs_attr_read(struct file *file, char __user *buf,
		size_t len, loff_t *ppos)
{
	struct spufs_attr *attr;
	size_t size;
	ssize_t ret;

	attr = file->private_data;
	if (!attr->get)
		return -EACCES;

	ret = mutex_lock_interruptible(&attr->mutex);
	if (ret)
		return ret;

	if (*ppos) {		/* continued read */
		size = strlen(attr->get_buf);
	} else {		/* first read */
		u64 val;
		ret = attr->get(attr->data, &val);
		if (ret)
			goto out;

		size = scnprintf(attr->get_buf, sizeof(attr->get_buf),
				 attr->fmt, (unsigned long long)val);
	}

	ret = simple_read_from_buffer(buf, len, ppos, attr->get_buf, size);
out:
	mutex_unlock(&attr->mutex);
	return ret;
}

static ssize_t spufs_attr_write(struct file *file, const char __user *buf,
		size_t len, loff_t *ppos)
{
	struct spufs_attr *attr;
	u64 val;
	size_t size;
	ssize_t ret;

	attr = file->private_data;
	if (!attr->set)
		return -EACCES;

	ret = mutex_lock_interruptible(&attr->mutex);
	if (ret)
		return ret;

	ret = -EFAULT;
	size = min(sizeof(attr->set_buf) - 1, len);
	if (copy_from_user(attr->set_buf, buf, size))
		goto out;

	ret = len; /* claim we got the whole input */
	attr->set_buf[size] = '\0';
	val = simple_strtol(attr->set_buf, NULL, 0);
	attr->set(attr->data, val);
out:
	mutex_unlock(&attr->mutex);
	return ret;
}

#define DEFINE_SPUFS_SIMPLE_ATTRIBUTE(__fops, __get, __set, __fmt)	\
static int __fops ## _open(struct inode *inode, struct file *file)	\
{									\
	__simple_attr_check_format(__fmt, 0ull);			\
	return spufs_attr_open(inode, file, __get, __set, __fmt);	\
}									\
static struct file_operations __fops = {				\
	.owner	 = THIS_MODULE,						\
	.open	 = __fops ## _open,					\
	.release = spufs_attr_release,					\
	.read	 = spufs_attr_read,					\
	.write	 = spufs_attr_write,					\
};

static int
spufs_mem_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;
	mutex_lock(&ctx->mapping_lock);
	file->private_data = ctx;
	if (!i->i_openers++)
		ctx->local_store = inode->i_mapping;
	mutex_unlock(&ctx->mapping_lock);
	return 0;
}

static int
spufs_mem_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

	mutex_lock(&ctx->mapping_lock);
	if (!--i->i_openers)
		ctx->local_store = NULL;
	mutex_unlock(&ctx->mapping_lock);
static ssize_t
__spufs_mem_read(struct spu_context *ctx, char __user *buffer,
			size_t size, loff_t *pos)
{
	char *local_store = ctx->ops->get_ls(ctx);
	return simple_read_from_buffer(buffer, size, pos, local_store,
					LS_SIZE);
}

static ssize_t
spufs_mem_read(struct file *file, char __user *buffer,
				size_t size, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	ret = spu_acquire(ctx);
	if (ret)
		return ret;
	ret = __spufs_mem_read(ctx, buffer, size, pos);
	spu_release(ctx);
	return ret;
}

static ssize_t
spufs_mem_write(struct file *file, const char __user *buffer,
{
	struct spu_context *ctx = file->private_data;
	char *local_store;
	if (pos < 0)
		return -EINVAL;
	if (pos > LS_SIZE)
		return -EFBIG;
	if (size > LS_SIZE - pos)
		size = LS_SIZE - pos;
	ret = spu_acquire(ctx);
	if (ret)
		return ret;

	local_store = ctx->ops->get_ls(ctx);
	ret = copy_from_user(local_store + pos, buffer, size);
	spu_release(ctx);

	if (ret)
		return -EFAULT;
	*ppos = pos + size;
	return size;
static unsigned long spufs_mem_mmap_nopfn(struct vm_area_struct *vma,
					  unsigned long address)
	struct spu_context *ctx	= vma->vm_file->private_data;
	unsigned long pfn, offset, addr0 = address;
#ifdef CONFIG_SPU_FS_64K_LS
	struct spu_state *csa = &ctx->csa;
	int psize;

	/* Check what page size we are using */
	psize = get_slice_psize(vma->vm_mm, address);

	/* Some sanity checking */
	BUG_ON(csa->use_big_pages != (psize == MMU_PAGE_64K));

	/* Wow, 64K, cool, we need to align the address though */
	if (csa->use_big_pages) {
		BUG_ON(vma->vm_start & 0xffff);
		address &= ~0xfffful;
	}
#endif /* CONFIG_SPU_FS_64K_LS */
	offset = (address - vma->vm_start) + (vma->vm_pgoff << PAGE_SHIFT);
	pr_debug("spufs_mem_mmap_nopfn address=0x%lx -> 0x%lx, offset=0x%lx\n",
		 addr0, address, offset);

	if (spu_acquire(ctx))
		return NOPFN_REFAULT;
	if (ctx->state == SPU_STATE_SAVED) {
		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
		pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset);
	} else {
		vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
					     | _PAGE_NO_CACHE);
		pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT;
	vm_insert_pfn(vma, address, pfn);
static struct vm_operations_struct spufs_mem_mmap_vmops = {
	.nopfn = spufs_mem_mmap_nopfn,
static int spufs_mem_mmap(struct file *file, struct vm_area_struct *vma)
#ifdef CONFIG_SPU_FS_64K_LS
	struct spu_context	*ctx = file->private_data;
	struct spu_state	*csa = &ctx->csa;

	/* Sanity check VMA alignment */
	if (csa->use_big_pages) {
		pr_debug("spufs_mem_mmap 64K, start=0x%lx, end=0x%lx,"
			 " pgoff=0x%lx\n", vma->vm_start, vma->vm_end,
			 vma->vm_pgoff);
		if (vma->vm_start & 0xffff)
			return -EINVAL;
		if (vma->vm_pgoff & 0xf)
			return -EINVAL;
	}
#endif /* CONFIG_SPU_FS_64K_LS */

	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;
	vma->vm_flags |= VM_IO | VM_PFNMAP;
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
				     | _PAGE_NO_CACHE);

	vma->vm_ops = &spufs_mem_mmap_vmops;
static unsigned long spufs_get_unmapped_area(struct file *file,
		unsigned long addr, unsigned long len, unsigned long pgoff,
		unsigned long flags)
{
	struct spu_context	*ctx = file->private_data;
	struct spu_state	*csa = &ctx->csa;

	/* If not using big pages, fallback to normal MM g_u_a */
	if (!csa->use_big_pages)
		return current->mm->get_unmapped_area(file, addr, len,
						      pgoff, flags);

	/* Else, try to obtain a 64K pages slice */
	return slice_get_unmapped_area(addr, len, flags,
				       MMU_PAGE_64K, 1, 0);
}
#endif /* CONFIG_SPU_FS_64K_LS */

static const struct file_operations spufs_mem_fops = {
	.open			= spufs_mem_open,
	.release		= spufs_mem_release,
	.read			= spufs_mem_read,
	.write			= spufs_mem_write,
	.llseek			= generic_file_llseek,
	.mmap			= spufs_mem_mmap,
#ifdef CONFIG_SPU_FS_64K_LS
	.get_unmapped_area	= spufs_get_unmapped_area,
#endif
static unsigned long spufs_ps_nopfn(struct vm_area_struct *vma,
				    unsigned long address,
{
	struct spu_context *ctx = vma->vm_file->private_data;
	unsigned long area, offset = address - vma->vm_start;
	spu_context_nospu_trace(spufs_ps_nopfn__enter, ctx);

	offset += vma->vm_pgoff << PAGE_SHIFT;
	/*
	 * Because we release the mmap_sem, the context may be destroyed while
	 * we're in spu_wait. Grab an extra reference so it isn't destroyed
	 * in the meantime.
	 */
	get_spu_context(ctx);

	/*
	 * We have to wait for context to be loaded before we have
	 * pages to hand out to the user, but we don't want to wait
	 * with the mmap_sem held.
	 * It is possible to drop the mmap_sem here, but then we need
	 * to return NOPFN_REFAULT because the mappings may have
	 * hanged.
	if (spu_acquire(ctx))
	if (ctx->state == SPU_STATE_SAVED) {
		up_read(&current->mm->mmap_sem);
		spu_context_nospu_trace(spufs_ps_nopfn__sleep, ctx);
		ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE);
		spu_context_trace(spufs_ps_nopfn__wake, ctx, ctx->spu);
		down_read(&current->mm->mmap_sem);
	} else {
		area = ctx->spu->problem_phys + ps_offs;
		vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
		spu_context_trace(spufs_ps_nopfn__insert, ctx, ctx->spu);
	if (!ret)
		spu_release(ctx);
static unsigned long spufs_cntl_mmap_nopfn(struct vm_area_struct *vma,
					   unsigned long address)
	return spufs_ps_nopfn(vma, address, 0x4000, 0x1000);
}

static struct vm_operations_struct spufs_cntl_mmap_vmops = {
	.nopfn = spufs_cntl_mmap_nopfn,
};

/*
 * mmap support for problem state control area [0x4000 - 0x4fff].
 */
static int spufs_cntl_mmap(struct file *file, struct vm_area_struct *vma)
{
	if (!(vma->vm_flags & VM_SHARED))
		return -EINVAL;

	vma->vm_flags |= VM_IO | VM_PFNMAP;
	vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot)
				     | _PAGE_NO_CACHE | _PAGE_GUARDED);

	vma->vm_ops = &spufs_cntl_mmap_vmops;
	return 0;
}
#else /* SPUFS_MMAP_4K */
#define spufs_cntl_mmap NULL
#endif /* !SPUFS_MMAP_4K */
static int spufs_cntl_get(void *data, u64 *val)
	struct spu_context *ctx = data;
	ret = spu_acquire(ctx);
	if (ret)
		return ret;
	*val = ctx->ops->status_read(ctx);
static int spufs_cntl_set(void *data, u64 val)
	struct spu_context *ctx = data;
	ret = spu_acquire(ctx);
	if (ret)
		return ret;
	ctx->ops->runcntl_write(ctx, val);
	spu_release(ctx);
static int spufs_cntl_open(struct inode *inode, struct file *file)
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

	mutex_lock(&ctx->mapping_lock);
	file->private_data = ctx;
	if (!i->i_openers++)
		ctx->cntl = inode->i_mapping;
	mutex_unlock(&ctx->mapping_lock);
	return simple_attr_open(inode, file, spufs_cntl_get,
					spufs_cntl_set, "0x%08lx");
static int
spufs_cntl_release(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	struct spu_context *ctx = i->i_ctx;

	simple_attr_release(inode, file);
	mutex_lock(&ctx->mapping_lock);
	if (!--i->i_openers)
		ctx->cntl = NULL;
	mutex_unlock(&ctx->mapping_lock);
static const struct file_operations spufs_cntl_fops = {
	.open = spufs_cntl_open,
	.release = spufs_cntl_release,
	.read = simple_attr_read,
	.write = simple_attr_write,
static int
spufs_regs_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	file->private_data = i->i_ctx;
	return 0;
}

static ssize_t
__spufs_regs_read(struct spu_context *ctx, char __user *buffer,
			size_t size, loff_t *pos)
{
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	return simple_read_from_buffer(buffer, size, pos,
				      lscsa->gprs, sizeof lscsa->gprs);
}

static ssize_t
spufs_regs_read(struct file *file, char __user *buffer,
		size_t size, loff_t *pos)
{
	int ret;
	struct spu_context *ctx = file->private_data;
	ret = spu_acquire_saved(ctx);
	if (ret)
		return ret;
	ret = __spufs_regs_read(ctx, buffer, size, pos);
	return ret;
}

static ssize_t
spufs_regs_write(struct file *file, const char __user *buffer,
		 size_t size, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	int ret;

	size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size);
	if (size <= 0)
		return -EFBIG;
	*pos += size;

	ret = spu_acquire_saved(ctx);
	if (ret)
		return ret;

	ret = copy_from_user(lscsa->gprs + *pos - size,
			     buffer, size) ? -EFAULT : size;

static const struct file_operations spufs_regs_fops = {
	.open	 = spufs_regs_open,
	.read    = spufs_regs_read,
	.write   = spufs_regs_write,
	.llseek  = generic_file_llseek,
};

static ssize_t
__spufs_fpcr_read(struct spu_context *ctx, char __user * buffer,
			size_t size, loff_t * pos)
{
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	return simple_read_from_buffer(buffer, size, pos,
				      &lscsa->fpcr, sizeof(lscsa->fpcr));
}

static ssize_t
spufs_fpcr_read(struct file *file, char __user * buffer,
		size_t size, loff_t * pos)
{
	int ret;
	struct spu_context *ctx = file->private_data;
	ret = spu_acquire_saved(ctx);
	if (ret)
		return ret;
	ret = __spufs_fpcr_read(ctx, buffer, size, pos);
	return ret;
}

static ssize_t
spufs_fpcr_write(struct file *file, const char __user * buffer,
		 size_t size, loff_t * pos)
{
	struct spu_context *ctx = file->private_data;
	struct spu_lscsa *lscsa = ctx->csa.lscsa;
	int ret;

	size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size);
	if (size <= 0)
		return -EFBIG;

	ret = spu_acquire_saved(ctx);
	if (ret)
		return ret;
	ret = copy_from_user((char *)&lscsa->fpcr + *pos - size,
			     buffer, size) ? -EFAULT : size;

static const struct file_operations spufs_fpcr_fops = {
	.open = spufs_regs_open,
	.read = spufs_fpcr_read,
	.write = spufs_fpcr_write,
	.llseek = generic_file_llseek,
};

/* generic open function for all pipe-like files */
static int spufs_pipe_open(struct inode *inode, struct file *file)
{
	struct spufs_inode_info *i = SPUFS_I(inode);
	file->private_data = i->i_ctx;

	return nonseekable_open(inode, file);
}

/*
 * Read as many bytes from the mailbox as possible, until
 * one of the conditions becomes true:
 *
 * - no more data available in the mailbox
 * - end of the user provided buffer
 * - end of the mapped area
 */
static ssize_t spufs_mbox_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	u32 mbox_data, __user *udata;
	ssize_t count;

	if (len < 4)
		return -EINVAL;

	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	udata = (void __user *)buf;

	count = spu_acquire(ctx);
	if (count)
		return count;

	for (count = 0; (count + 4) <= len; count += 4, udata++) {
		int ret;
		ret = ctx->ops->mbox_read(ctx, &mbox_data);
		if (ret == 0)
			break;

		/*
		 * at the end of the mapped area, we can fault
		 * but still need to return the data we have
		 * read successfully so far.
		 */
		ret = __put_user(mbox_data, udata);
		if (ret) {
			if (!count)
				count = -EFAULT;
			break;
		}
	}
	spu_release(ctx);
static const struct file_operations spufs_mbox_fops = {
	.open	= spufs_pipe_open,
	.read	= spufs_mbox_read,
};

static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	u32 mbox_stat;

	if (len < 4)
		return -EINVAL;

	ret = spu_acquire(ctx);
	if (ret)
		return ret;

	mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff;

	spu_release(ctx);

	if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat))
		return -EFAULT;

	return 4;
}

static const struct file_operations spufs_mbox_stat_fops = {
	.open	= spufs_pipe_open,
	.read	= spufs_mbox_stat_read,
};

/* low-level ibox access function */
size_t spu_ibox_read(struct spu_context *ctx, u32 *data)
	return ctx->ops->ibox_read(ctx, data);
}
static int spufs_ibox_fasync(int fd, struct file *file, int on)
{
	struct spu_context *ctx = file->private_data;
	return fasync_helper(fd, file, on, &ctx->ibox_fasync);
/* interrupt-level ibox callback function. */
void spufs_ibox_callback(struct spu *spu)
	struct spu_context *ctx = spu->ctx;

	wake_up_all(&ctx->ibox_wq);
	kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN);
/*
 * Read as many bytes from the interrupt mailbox as possible, until
 * one of the conditions becomes true:
 *
 * - no more data available in the mailbox
 * - end of the user provided buffer
 * - end of the mapped area
 *
 * If the file is opened without O_NONBLOCK, we wait here until
 * any data is available, but return when we have been able to
 * read something.
 */
static ssize_t spufs_ibox_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	u32 ibox_data, __user *udata;
	ssize_t count;

	if (len < 4)
		return -EINVAL;

	if (!access_ok(VERIFY_WRITE, buf, len))
		return -EFAULT;

	udata = (void __user *)buf;

	count = spu_acquire(ctx);
	if (count)
	/* wait only for the first element */
	count = 0;
	if (file->f_flags & O_NONBLOCK) {
		if (!spu_ibox_read(ctx, &ibox_data)) {
		count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data));
		if (count)
			goto out;
	/* if we can't write at all, return -EFAULT */
	count = __put_user(ibox_data, udata);
	if (count)
		goto out_unlock;
	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
		int ret;
		ret = ctx->ops->ibox_read(ctx, &ibox_data);
		if (ret == 0)
			break;
		/*
		 * at the end of the mapped area, we can fault
		 * but still need to return the data we have
		 * read successfully so far.
		 */
		ret = __put_user(ibox_data, udata);
		if (ret)
			break;
	}
}

static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait)
{
	struct spu_context *ctx = file->private_data;
	unsigned int mask;

	poll_wait(file, &ctx->ibox_wq, wait);
	/*
	 * For now keep this uninterruptible and also ignore the rule
	 * that poll should not sleep.  Will be fixed later.
	 */
	mutex_lock(&ctx->state_mutex);
	mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM);
	spu_release(ctx);
static const struct file_operations spufs_ibox_fops = {
	.open	= spufs_pipe_open,
	.read	= spufs_ibox_read,
	.poll	= spufs_ibox_poll,
	.fasync	= spufs_ibox_fasync,
};

static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	u32 ibox_stat;

	if (len < 4)
		return -EINVAL;

	ret = spu_acquire(ctx);
	if (ret)
		return ret;
	ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff;
	spu_release(ctx);

	if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat))
		return -EFAULT;

	return 4;
}

static const struct file_operations spufs_ibox_stat_fops = {
	.open	= spufs_pipe_open,
	.read	= spufs_ibox_stat_read,
};

/* low-level mailbox write */
size_t spu_wbox_write(struct spu_context *ctx, u32 data)
	return ctx->ops->wbox_write(ctx, data);
}
static int spufs_wbox_fasync(int fd, struct file *file, int on)
{
	struct spu_context *ctx = file->private_data;
	int ret;
	ret = fasync_helper(fd, file, on, &ctx->wbox_fasync);
/* interrupt-level wbox callback function. */
void spufs_wbox_callback(struct spu *spu)
	struct spu_context *ctx = spu->ctx;

	wake_up_all(&ctx->wbox_wq);
	kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT);
/*
 * Write as many bytes to the interrupt mailbox as possible, until
 * one of the conditions becomes true:
 *
 * - the mailbox is full
 * - end of the user provided buffer
 * - end of the mapped area
 *
 * If the file is opened without O_NONBLOCK, we wait here until
 * space is availabyl, but return when we have been able to
 * write something.
 */
static ssize_t spufs_wbox_write(struct file *file, const char __user *buf,
			size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	u32 wbox_data, __user *udata;
	ssize_t count;

	if (len < 4)
		return -EINVAL;

	udata = (void __user *)buf;
	if (!access_ok(VERIFY_READ, buf, len))
		return -EFAULT;

	if (__get_user(wbox_data, udata))
	count = spu_acquire(ctx);
	if (count)
	/*
	 * make sure we can at least write one element, by waiting
	 * in case of !O_NONBLOCK
	 */
	count = 0;
	if (file->f_flags & O_NONBLOCK) {
		if (!spu_wbox_write(ctx, wbox_data)) {
		count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data));
		if (count)
			goto out;
	/* write as much as possible */
	for (count = 4, udata++; (count + 4) <= len; count += 4, udata++) {
		int ret;
		ret = __get_user(wbox_data, udata);
		if (ret)
			break;

		ret = spu_wbox_write(ctx, wbox_data);
		if (ret == 0)
			break;
	}

}

static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait)
{
	struct spu_context *ctx = file->private_data;
	unsigned int mask;

	poll_wait(file, &ctx->wbox_wq, wait);
	/*
	 * For now keep this uninterruptible and also ignore the rule
	 * that poll should not sleep.  Will be fixed later.
	 */
	mutex_lock(&ctx->state_mutex);
	mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM);
	spu_release(ctx);
static const struct file_operations spufs_wbox_fops = {
	.open	= spufs_pipe_open,
	.write	= spufs_wbox_write,
	.poll	= spufs_wbox_poll,
	.fasync	= spufs_wbox_fasync,
};

static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf,
			size_t len, loff_t *pos)
{
	struct spu_context *ctx = file->private_data;
	u32 wbox_stat;

	if (len < 4)
		return -EINVAL;

	ret = spu_acquire(ctx);
	if (ret)
		return ret;
	wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff;
	spu_release(ctx);

	if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat))