Newer
Older
// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
#include <linux/host1x.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_prime.h>
#include <drm/drm_vblank.h>
#define DRIVER_NAME "tegra"
#define DRIVER_DESC "NVIDIA Tegra graphics"
#define DRIVER_DATE "20120330"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define CDMA_GATHER_FETCHES_MAX_NB 16383
struct tegra_drm_file {
struct idr contexts;
struct mutex lock;
static int tegra_atomic_check(struct drm_device *drm,
struct drm_atomic_state *state)
err = drm_atomic_helper_check(drm, state);
return tegra_display_hub_atomic_check(drm, state);
static const struct drm_mode_config_funcs tegra_drm_mode_config_funcs = {
.fb_create = tegra_fb_create,
#ifdef CONFIG_DRM_FBDEV_EMULATION
.output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_commit = drm_atomic_helper_commit,
};
static void tegra_atomic_commit_tail(struct drm_atomic_state *old_state)
{
struct drm_device *drm = old_state->dev;
struct tegra_drm *tegra = drm->dev_private;
if (tegra->hub) {
drm_atomic_helper_commit_modeset_disables(drm, old_state);
tegra_display_hub_atomic_commit(drm, old_state);
drm_atomic_helper_commit_planes(drm, old_state, 0);
drm_atomic_helper_commit_modeset_enables(drm, old_state);
drm_atomic_helper_commit_hw_done(old_state);
drm_atomic_helper_wait_for_vblanks(drm, old_state);
drm_atomic_helper_cleanup_planes(drm, old_state);
} else {
drm_atomic_helper_commit_tail_rpm(old_state);
}
}
static const struct drm_mode_config_helper_funcs
tegra_drm_mode_config_helpers = {
.atomic_commit_tail = tegra_atomic_commit_tail,
static int tegra_drm_open(struct drm_device *drm, struct drm_file *filp)
{
struct tegra_drm_file *fpriv;
fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
if (!fpriv)
return -ENOMEM;
idr_init_base(&fpriv->contexts, 1);
mutex_init(&fpriv->lock);
static void tegra_drm_context_free(struct tegra_drm_context *context)
{
context->client->ops->close_channel(context);
kfree(context);
}
host1x_bo_lookup(struct drm_file *file, u32 handle)
{
struct drm_gem_object *gem;
struct tegra_bo *bo;
gem = drm_gem_object_lookup(file, handle);
if (!gem)
return NULL;
bo = to_tegra_bo(gem);
return &bo->base;
}
static int host1x_reloc_copy_from_user(struct host1x_reloc *dest,
struct drm_tegra_reloc __user *src,
struct drm_device *drm,
struct drm_file *file)
{
u32 cmdbuf, target;
int err;
err = get_user(cmdbuf, &src->cmdbuf.handle);
if (err < 0)
return err;
err = get_user(dest->cmdbuf.offset, &src->cmdbuf.offset);
if (err < 0)
return err;
err = get_user(target, &src->target.handle);
if (err < 0)
return err;
err = get_user(dest->target.offset, &src->target.offset);
if (err < 0)
return err;
err = get_user(dest->shift, &src->shift);
if (err < 0)
return err;
dest->flags = HOST1X_RELOC_READ | HOST1X_RELOC_WRITE;
dest->cmdbuf.bo = host1x_bo_lookup(file, cmdbuf);
if (!dest->cmdbuf.bo)
return -ENOENT;
dest->target.bo = host1x_bo_lookup(file, target);
if (!dest->target.bo)
return -ENOENT;
return 0;
}
int tegra_drm_submit(struct tegra_drm_context *context,
struct drm_tegra_submit *args, struct drm_device *drm,
struct drm_file *file)
{
struct host1x_client *client = &context->client->base;
unsigned int num_cmdbufs = args->num_cmdbufs;
unsigned int num_relocs = args->num_relocs;
struct drm_tegra_cmdbuf __user *user_cmdbufs;
struct drm_tegra_reloc __user *user_relocs;
struct drm_tegra_syncpt __user *user_syncpt;
struct drm_tegra_syncpt syncpt;
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct drm_gem_object **refs;
struct host1x_syncpt *sp;
unsigned int num_refs;
user_cmdbufs = u64_to_user_ptr(args->cmdbufs);
user_relocs = u64_to_user_ptr(args->relocs);
user_syncpt = u64_to_user_ptr(args->syncpts);
/* We don't yet support other than one syncpt_incr struct per submit */
if (args->num_syncpts != 1)
return -EINVAL;
Dmitry Osipenko
committed
/* We don't yet support waitchks */
if (args->num_waitchks != 0)
return -EINVAL;
job = host1x_job_alloc(context->channel, args->num_cmdbufs,
if (!job)
return -ENOMEM;
job->num_relocs = args->num_relocs;
job->client = client;
job->class = client->class;
/*
* Track referenced BOs so that they can be unreferenced after the
* submission is complete.
*/
num_refs = num_cmdbufs + num_relocs * 2;
refs = kmalloc_array(num_refs, sizeof(*refs), GFP_KERNEL);
if (!refs) {
err = -ENOMEM;
goto put;
}
/* reuse as an iterator later */
num_refs = 0;
while (num_cmdbufs) {
struct drm_tegra_cmdbuf cmdbuf;
struct host1x_bo *bo;
struct tegra_bo *obj;
u64 offset;
if (copy_from_user(&cmdbuf, user_cmdbufs, sizeof(cmdbuf))) {
/*
* The maximum number of CDMA gather fetches is 16383, a higher
* value means the words count is malformed.
*/
if (cmdbuf.words > CDMA_GATHER_FETCHES_MAX_NB) {
err = -EINVAL;
goto fail;
}
bo = host1x_bo_lookup(file, cmdbuf.handle);
if (!bo) {
err = -ENOENT;
goto fail;
}
offset = (u64)cmdbuf.offset + (u64)cmdbuf.words * sizeof(u32);
obj = host1x_to_tegra_bo(bo);
refs[num_refs++] = &obj->gem;
/*
* Gather buffer base address must be 4-bytes aligned,
* unaligned offset is malformed and cause commands stream
* corruption on the buffer address relocation.
*/
if (offset & 3 || offset > obj->gem.size) {
err = -EINVAL;
goto fail;
}
host1x_job_add_gather(job, bo, cmdbuf.words, cmdbuf.offset);
num_cmdbufs--;
/* copy and resolve relocations from submit */
struct host1x_reloc *reloc;
struct tegra_bo *obj;
err = host1x_reloc_copy_from_user(&job->relocs[num_relocs],
reloc = &job->relocs[num_relocs];
obj = host1x_to_tegra_bo(reloc->cmdbuf.bo);
refs[num_refs++] = &obj->gem;
/*
* The unaligned cmdbuf offset will cause an unaligned write
* during of the relocations patching, corrupting the commands
* stream.
*/
if (reloc->cmdbuf.offset & 3 ||
reloc->cmdbuf.offset >= obj->gem.size) {
err = -EINVAL;
goto fail;
}
obj = host1x_to_tegra_bo(reloc->target.bo);
refs[num_refs++] = &obj->gem;
if (reloc->target.offset >= obj->gem.size) {
err = -EINVAL;
goto fail;
}
if (copy_from_user(&syncpt, user_syncpt, sizeof(syncpt))) {
/* check whether syncpoint ID is valid */
sp = host1x_syncpt_get(host1x, syncpt.id);
if (!sp) {
err = -ENOENT;
goto fail;
}
job->is_addr_reg = context->client->ops->is_addr_reg;
job->is_valid_class = context->client->ops->is_valid_class;
job->syncpt_incrs = syncpt.incrs;
job->syncpt_id = syncpt.id;
job->timeout = 10000;
if (args->timeout && args->timeout < 10000)
job->timeout = args->timeout;
err = host1x_job_pin(job, context->client->base.dev);
if (err)
goto fail;
err = host1x_job_submit(job);
if (err) {
host1x_job_unpin(job);
goto fail;
}
args->fence = job->syncpt_end;
fail:
while (num_refs--)
drm_gem_object_put(refs[num_refs]);
kfree(refs);
put:
host1x_job_put(job);
return err;
}
#ifdef CONFIG_DRM_TEGRA_STAGING
static int tegra_gem_create(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_create *args = data;
struct tegra_bo *bo;
bo = tegra_bo_create_with_handle(file, drm, args->size, args->flags,
&args->handle);
if (IS_ERR(bo))
return PTR_ERR(bo);
return 0;
}
static int tegra_gem_mmap(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_mmap *args = data;
struct drm_gem_object *gem;
struct tegra_bo *bo;
gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -EINVAL;
bo = to_tegra_bo(gem);
args->offset = drm_vma_node_offset_addr(&bo->gem.vma_node);
drm_gem_object_put(gem);
return 0;
}
static int tegra_syncpt_read(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct host1x *host = dev_get_drvdata(drm->dev->parent);
struct host1x_syncpt *sp;
sp = host1x_syncpt_get(host, args->id);
if (!sp)
return -EINVAL;
args->value = host1x_syncpt_read_min(sp);
return 0;
}
static int tegra_syncpt_incr(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct host1x_syncpt *sp;
sp = host1x_syncpt_get(host1x, args->id);
return host1x_syncpt_incr(sp);
}
static int tegra_syncpt_wait(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct host1x *host1x = dev_get_drvdata(drm->dev->parent);
struct host1x_syncpt *sp;
sp = host1x_syncpt_get(host1x, args->id);
return host1x_syncpt_wait(sp, args->thresh,
msecs_to_jiffies(args->timeout),
static int tegra_client_open(struct tegra_drm_file *fpriv,
struct tegra_drm_client *client,
struct tegra_drm_context *context)
{
int err;
err = client->ops->open_channel(client, context);
if (err < 0)
return err;
err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
if (err < 0) {
client->ops->close_channel(context);
return err;
}
context->client = client;
context->id = err;
return 0;
}
static int tegra_open_channel(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct tegra_drm *tegra = drm->dev_private;
struct drm_tegra_open_channel *args = data;
struct tegra_drm_context *context;
struct tegra_drm_client *client;
int err = -ENODEV;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
mutex_lock(&fpriv->lock);
list_for_each_entry(client, &tegra->clients, list)
if (client->base.class == args->client) {
err = tegra_client_open(fpriv, client, context);
if (err < 0)
args->context = context->id;
break;
if (err < 0)
kfree(context);
mutex_unlock(&fpriv->lock);
return err;
}
static int tegra_close_channel(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_close_channel *args = data;
struct tegra_drm_context *context;
mutex_lock(&fpriv->lock);
context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -EINVAL;
goto unlock;
}
idr_remove(&fpriv->contexts, context->id);
tegra_drm_context_free(context);
unlock:
mutex_unlock(&fpriv->lock);
return err;
}
static int tegra_get_syncpt(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct tegra_drm_context *context;
mutex_lock(&fpriv->lock);
context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
}
if (args->index >= context->client->base.num_syncpts) {
err = -EINVAL;
goto unlock;
}
syncpt = context->client->base.syncpts[args->index];
unlock:
mutex_unlock(&fpriv->lock);
return err;
}
static int tegra_submit(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct tegra_drm_context *context;
mutex_lock(&fpriv->lock);
context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
}
err = context->client->ops->submit(context, args, drm, file);
unlock:
mutex_unlock(&fpriv->lock);
return err;
static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
struct drm_tegra_get_syncpt_base *args = data;
struct tegra_drm_context *context;
struct host1x_syncpt_base *base;
struct host1x_syncpt *syncpt;
mutex_lock(&fpriv->lock);
context = idr_find(&fpriv->contexts, args->context);
if (!context) {
err = -ENODEV;
goto unlock;
}
if (args->syncpt >= context->client->base.num_syncpts) {
err = -EINVAL;
goto unlock;
}
syncpt = context->client->base.syncpts[args->syncpt];
base = host1x_syncpt_get_base(syncpt);
if (!base) {
err = -ENXIO;
goto unlock;
}
args->id = host1x_syncpt_base_id(base);
unlock:
mutex_unlock(&fpriv->lock);
return err;
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
static int tegra_gem_set_tiling(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_set_tiling *args = data;
enum tegra_bo_tiling_mode mode;
struct drm_gem_object *gem;
unsigned long value = 0;
struct tegra_bo *bo;
switch (args->mode) {
case DRM_TEGRA_GEM_TILING_MODE_PITCH:
mode = TEGRA_BO_TILING_MODE_PITCH;
if (args->value != 0)
return -EINVAL;
break;
case DRM_TEGRA_GEM_TILING_MODE_TILED:
mode = TEGRA_BO_TILING_MODE_TILED;
if (args->value != 0)
return -EINVAL;
break;
case DRM_TEGRA_GEM_TILING_MODE_BLOCK:
mode = TEGRA_BO_TILING_MODE_BLOCK;
if (args->value > 5)
return -EINVAL;
value = args->value;
break;
default:
return -EINVAL;
}
gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
bo = to_tegra_bo(gem);
bo->tiling.mode = mode;
bo->tiling.value = value;
drm_gem_object_put(gem);
return 0;
}
static int tegra_gem_get_tiling(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_get_tiling *args = data;
struct drm_gem_object *gem;
struct tegra_bo *bo;
int err = 0;
gem = drm_gem_object_lookup(file, args->handle);
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
if (!gem)
return -ENOENT;
bo = to_tegra_bo(gem);
switch (bo->tiling.mode) {
case TEGRA_BO_TILING_MODE_PITCH:
args->mode = DRM_TEGRA_GEM_TILING_MODE_PITCH;
args->value = 0;
break;
case TEGRA_BO_TILING_MODE_TILED:
args->mode = DRM_TEGRA_GEM_TILING_MODE_TILED;
args->value = 0;
break;
case TEGRA_BO_TILING_MODE_BLOCK:
args->mode = DRM_TEGRA_GEM_TILING_MODE_BLOCK;
args->value = bo->tiling.value;
break;
default:
err = -EINVAL;
break;
}
drm_gem_object_put(gem);
static int tegra_gem_set_flags(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_set_flags *args = data;
struct drm_gem_object *gem;
struct tegra_bo *bo;
if (args->flags & ~DRM_TEGRA_GEM_FLAGS)
return -EINVAL;
gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
bo = to_tegra_bo(gem);
bo->flags = 0;
if (args->flags & DRM_TEGRA_GEM_BOTTOM_UP)
bo->flags |= TEGRA_BO_BOTTOM_UP;
drm_gem_object_put(gem);
return 0;
}
static int tegra_gem_get_flags(struct drm_device *drm, void *data,
struct drm_file *file)
{
struct drm_tegra_gem_get_flags *args = data;
struct drm_gem_object *gem;
struct tegra_bo *bo;
gem = drm_gem_object_lookup(file, args->handle);
if (!gem)
return -ENOENT;
bo = to_tegra_bo(gem);
args->flags = 0;
if (bo->flags & TEGRA_BO_BOTTOM_UP)
args->flags |= DRM_TEGRA_GEM_BOTTOM_UP;
drm_gem_object_put(gem);
static const struct drm_ioctl_desc tegra_drm_ioctls[] = {
DRM_IOCTL_DEF_DRV(TEGRA_GEM_CREATE, tegra_gem_create,
DRM_IOCTL_DEF_DRV(TEGRA_GEM_MMAP, tegra_gem_mmap,
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_READ, tegra_syncpt_read,
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_INCR, tegra_syncpt_incr,
DRM_IOCTL_DEF_DRV(TEGRA_SYNCPT_WAIT, tegra_syncpt_wait,
DRM_IOCTL_DEF_DRV(TEGRA_OPEN_CHANNEL, tegra_open_channel,
DRM_IOCTL_DEF_DRV(TEGRA_CLOSE_CHANNEL, tegra_close_channel,
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT, tegra_get_syncpt,
DRM_IOCTL_DEF_DRV(TEGRA_SUBMIT, tegra_submit,
DRM_IOCTL_DEF_DRV(TEGRA_GET_SYNCPT_BASE, tegra_get_syncpt_base,
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_TILING, tegra_gem_set_tiling,
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_TILING, tegra_gem_get_tiling,
DRM_IOCTL_DEF_DRV(TEGRA_GEM_SET_FLAGS, tegra_gem_set_flags,
DRM_IOCTL_DEF_DRV(TEGRA_GEM_GET_FLAGS, tegra_gem_get_flags,
};
static const struct file_operations tegra_drm_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.poll = drm_poll,
.read = drm_read,
.compat_ioctl = drm_compat_ioctl,
.llseek = noop_llseek,
};
static int tegra_drm_context_cleanup(int id, void *p, void *data)
{
struct tegra_drm_context *context = p;
tegra_drm_context_free(context);
return 0;
}
static void tegra_drm_postclose(struct drm_device *drm, struct drm_file *file)
struct tegra_drm_file *fpriv = file->driver_priv;
mutex_lock(&fpriv->lock);
idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
mutex_unlock(&fpriv->lock);
idr_destroy(&fpriv->contexts);
mutex_destroy(&fpriv->lock);
#ifdef CONFIG_DEBUG_FS
static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_device *drm = node->minor->dev;
struct drm_framebuffer *fb;
mutex_lock(&drm->mode_config.fb_lock);
list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
fb->base.id, fb->width, fb->height,
fb->format->depth,
drm_framebuffer_read_refcount(fb));
}
mutex_unlock(&drm->mode_config.fb_lock);
return 0;
}
static int tegra_debugfs_iova(struct seq_file *s, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_device *drm = node->minor->dev;
struct tegra_drm *tegra = drm->dev_private;
struct drm_printer p = drm_seq_file_printer(s);
if (tegra->domain) {
mutex_lock(&tegra->mm_lock);
drm_mm_print(&tegra->mm, &p);
mutex_unlock(&tegra->mm_lock);
}
static struct drm_info_list tegra_debugfs_list[] = {
{ "framebuffers", tegra_debugfs_framebuffers, 0 },
{ "iova", tegra_debugfs_iova, 0 },
static void tegra_debugfs_init(struct drm_minor *minor)
drm_debugfs_create_files(tegra_debugfs_list,
ARRAY_SIZE(tegra_debugfs_list),
minor->debugfs_root, minor);
static const struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM |
.lastclose = drm_fb_helper_lastclose,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = tegra_debugfs_init,
#endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import = tegra_gem_prime_import,
.dumb_create = tegra_bo_dumb_create,
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
.fops = &tegra_drm_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client)
{
mutex_lock(&tegra->clients_lock);
list_add_tail(&client->list, &tegra->clients);
client->drm = tegra;
mutex_unlock(&tegra->clients_lock);
return 0;
}
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client)
{
mutex_lock(&tegra->clients_lock);
list_del_init(&client->list);
client->drm = NULL;
mutex_unlock(&tegra->clients_lock);
return 0;
}
int host1x_client_iommu_attach(struct host1x_client *client)
struct iommu_domain *domain = iommu_get_domain_for_dev(client->dev);
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
struct iommu_group *group = NULL;
int err;
/*
* If the host1x client is already attached to an IOMMU domain that is
* not the shared IOMMU domain, don't try to attach it to a different
* domain. This allows using the IOMMU-backed DMA API.
*/
if (domain && domain != tegra->domain)
return 0;
group = iommu_group_get(client->dev);
if (domain != tegra->domain) {
err = iommu_attach_group(tegra->domain, group);
if (err < 0) {
iommu_group_put(group);
tegra->use_explicit_iommu = true;
client->group = group;
return 0;
void host1x_client_iommu_detach(struct host1x_client *client)
struct drm_device *drm = dev_get_drvdata(client->host);
struct tegra_drm *tegra = drm->dev_private;
/*
* Devices that are part of the same group may no longer be
* attached to a domain at this point because their group may
* have been detached by an earlier client.
*/
domain = iommu_get_domain_for_dev(client->dev);
if (domain)
iommu_detach_group(tegra->domain, client->group);
iommu_group_put(client->group);
client->group = NULL;
void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size, dma_addr_t *dma)
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
{
struct iova *alloc;
void *virt;
gfp_t gfp;
int err;
if (tegra->domain)
size = iova_align(&tegra->carveout.domain, size);
else
size = PAGE_ALIGN(size);
gfp = GFP_KERNEL | __GFP_ZERO;
if (!tegra->domain) {
/*
* Many units only support 32-bit addresses, even on 64-bit
* SoCs. If there is no IOMMU to translate into a 32-bit IO
* virtual address space, force allocations to be in the
* lower 32-bit range.
*/
gfp |= GFP_DMA;
}
virt = (void *)__get_free_pages(gfp, get_order(size));
if (!virt)
return ERR_PTR(-ENOMEM);
if (!tegra->domain) {
/*
* If IOMMU is disabled, devices address physical memory
* directly.
*/
*dma = virt_to_phys(virt);
return virt;
}
alloc = alloc_iova(&tegra->carveout.domain,
size >> tegra->carveout.shift,
tegra->carveout.limit, true);
if (!alloc) {
err = -EBUSY;
goto free_pages;