Newer
Older
return 0;
}
static void tegra_drm_preclose(struct drm_device *drm, struct drm_file *file)
{
struct tegra_drm_file *fpriv = file->driver_priv;
mutex_lock(&fpriv->lock);
idr_for_each(&fpriv->contexts, tegra_drm_context_cleanup, NULL);
mutex_unlock(&fpriv->lock);
idr_destroy(&fpriv->contexts);
mutex_destroy(&fpriv->lock);
#ifdef CONFIG_DEBUG_FS
static int tegra_debugfs_framebuffers(struct seq_file *s, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_device *drm = node->minor->dev;
struct drm_framebuffer *fb;
mutex_lock(&drm->mode_config.fb_lock);
list_for_each_entry(fb, &drm->mode_config.fb_list, head) {
seq_printf(s, "%3d: user size: %d x %d, depth %d, %d bpp, refcount %d\n",
fb->base.id, fb->width, fb->height,
fb->format->depth,
drm_framebuffer_read_refcount(fb));
}
mutex_unlock(&drm->mode_config.fb_lock);
return 0;
}
static int tegra_debugfs_iova(struct seq_file *s, void *data)
{
struct drm_info_node *node = (struct drm_info_node *)s->private;
struct drm_device *drm = node->minor->dev;
struct tegra_drm *tegra = drm->dev_private;
struct drm_printer p = drm_seq_file_printer(s);
mutex_lock(&tegra->mm_lock);
mutex_unlock(&tegra->mm_lock);
static struct drm_info_list tegra_debugfs_list[] = {
{ "framebuffers", tegra_debugfs_framebuffers, 0 },
{ "iova", tegra_debugfs_iova, 0 },
};
static int tegra_debugfs_init(struct drm_minor *minor)
{
return drm_debugfs_create_files(tegra_debugfs_list,
ARRAY_SIZE(tegra_debugfs_list),
minor->debugfs_root, minor);
}
#endif
static struct drm_driver tegra_drm_driver = {
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME |
DRIVER_ATOMIC,
.load = tegra_drm_load,
.unload = tegra_drm_unload,
.open = tegra_drm_open,
.preclose = tegra_drm_preclose,
.lastclose = tegra_drm_lastclose,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = tegra_debugfs_init,
#endif
.gem_free_object_unlocked = tegra_bo_free_object,
.gem_vm_ops = &tegra_bo_vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = tegra_gem_prime_export,
.gem_prime_import = tegra_gem_prime_import,
.dumb_create = tegra_bo_dumb_create,
.dumb_map_offset = tegra_bo_dumb_map_offset,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = tegra_drm_ioctls,
.num_ioctls = ARRAY_SIZE(tegra_drm_ioctls),
.fops = &tegra_drm_fops,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
int tegra_drm_register_client(struct tegra_drm *tegra,
struct tegra_drm_client *client)
{
mutex_lock(&tegra->clients_lock);
list_add_tail(&client->list, &tegra->clients);
mutex_unlock(&tegra->clients_lock);
return 0;
}
int tegra_drm_unregister_client(struct tegra_drm *tegra,
struct tegra_drm_client *client)
{
mutex_lock(&tegra->clients_lock);
list_del_init(&client->list);
mutex_unlock(&tegra->clients_lock);
return 0;
}
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
void *tegra_drm_alloc(struct tegra_drm *tegra, size_t size,
dma_addr_t *dma)
{
struct iova *alloc;
void *virt;
gfp_t gfp;
int err;
if (tegra->domain)
size = iova_align(&tegra->carveout.domain, size);
else
size = PAGE_ALIGN(size);
gfp = GFP_KERNEL | __GFP_ZERO;
if (!tegra->domain) {
/*
* Many units only support 32-bit addresses, even on 64-bit
* SoCs. If there is no IOMMU to translate into a 32-bit IO
* virtual address space, force allocations to be in the
* lower 32-bit range.
*/
gfp |= GFP_DMA;
}
virt = (void *)__get_free_pages(gfp, get_order(size));
if (!virt)
return ERR_PTR(-ENOMEM);
if (!tegra->domain) {
/*
* If IOMMU is disabled, devices address physical memory
* directly.
*/
*dma = virt_to_phys(virt);
return virt;
}
alloc = alloc_iova(&tegra->carveout.domain,
size >> tegra->carveout.shift,
tegra->carveout.limit, true);
if (!alloc) {
err = -EBUSY;
goto free_pages;
}
*dma = iova_dma_addr(&tegra->carveout.domain, alloc);
err = iommu_map(tegra->domain, *dma, virt_to_phys(virt),
size, IOMMU_READ | IOMMU_WRITE);
if (err < 0)
goto free_iova;
return virt;
free_iova:
__free_iova(&tegra->carveout.domain, alloc);
free_pages:
free_pages((unsigned long)virt, get_order(size));
return ERR_PTR(err);
}
void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
dma_addr_t dma)
{
if (tegra->domain)
size = iova_align(&tegra->carveout.domain, size);
else
size = PAGE_ALIGN(size);
if (tegra->domain) {
iommu_unmap(tegra->domain, dma, size);
free_iova(&tegra->carveout.domain,
iova_pfn(&tegra->carveout.domain, dma));
}
free_pages((unsigned long)virt, get_order(size));
}
static int host1x_drm_probe(struct host1x_device *dev)
struct drm_driver *driver = &tegra_drm_driver;
struct drm_device *drm;
int err;
drm = drm_dev_alloc(driver, &dev->dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
dev_set_drvdata(&dev->dev, drm);
err = drm_dev_register(drm, 0);
if (err < 0)
goto unref;
return 0;
unref:
drm_dev_unref(drm);
return err;
static int host1x_drm_remove(struct host1x_device *dev)
struct drm_device *drm = dev_get_drvdata(&dev->dev);
drm_dev_unregister(drm);
drm_dev_unref(drm);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int host1x_drm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct tegra_drm *tegra = drm->dev_private;
drm_kms_helper_poll_disable(drm);
tegra_drm_fb_suspend(drm);
tegra->state = drm_atomic_helper_suspend(drm);
if (IS_ERR(tegra->state)) {
tegra_drm_fb_resume(drm);
drm_kms_helper_poll_enable(drm);
return PTR_ERR(tegra->state);
}
return 0;
}
static int host1x_drm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
struct tegra_drm *tegra = drm->dev_private;
drm_atomic_helper_resume(drm, tegra->state);
tegra_drm_fb_resume(drm);
drm_kms_helper_poll_enable(drm);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(host1x_drm_pm_ops, host1x_drm_suspend,
host1x_drm_resume);
static const struct of_device_id host1x_drm_subdevs[] = {
{ .compatible = "nvidia,tegra20-dc", },
{ .compatible = "nvidia,tegra20-hdmi", },
{ .compatible = "nvidia,tegra20-gr2d", },
{ .compatible = "nvidia,tegra30-dc", },
{ .compatible = "nvidia,tegra30-hdmi", },
{ .compatible = "nvidia,tegra30-gr2d", },
{ .compatible = "nvidia,tegra114-hdmi", },
{ .compatible = "nvidia,tegra124-dc", },
{ .compatible = "nvidia,tegra124-hdmi", },
{ .compatible = "nvidia,tegra124-dsi", },
{ .compatible = "nvidia,tegra132-dsi", },
{ .compatible = "nvidia,tegra210-dc", },
{ .compatible = "nvidia,tegra210-dsi", },
{ .compatible = "nvidia,tegra210-sor", },
{ .compatible = "nvidia,tegra210-sor1", },
{ /* sentinel */ }
};
static struct host1x_driver host1x_drm_driver = {
.driver = {
.name = "drm",
.probe = host1x_drm_probe,
.remove = host1x_drm_remove,
.subdevs = host1x_drm_subdevs,
};
static struct platform_driver * const drivers[] = {
&tegra_dc_driver,
&tegra_hdmi_driver,
&tegra_dsi_driver,
&tegra_dpaux_driver,
&tegra_sor_driver,
&tegra_gr2d_driver,
&tegra_gr3d_driver,
static int __init host1x_drm_init(void)
{
int err;
err = host1x_driver_register(&host1x_drm_driver);
if (err < 0)
return err;
err = platform_register_drivers(drivers, ARRAY_SIZE(drivers));
if (err < 0)
goto unregister_host1x;
return 0;
unregister_host1x:
host1x_driver_unregister(&host1x_drm_driver);
return err;
}
module_init(host1x_drm_init);
static void __exit host1x_drm_exit(void)
{
platform_unregister_drivers(drivers, ARRAY_SIZE(drivers));
host1x_driver_unregister(&host1x_drm_driver);
}
module_exit(host1x_drm_exit);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
MODULE_DESCRIPTION("NVIDIA Tegra DRM driver");
MODULE_LICENSE("GPL v2");