Newer
Older
/*
* Copyright (C) 2012 Avionic Design GmbH
* Copyright (C) 2012-2016 NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/host1x.h>
#include <linux/idr.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#define DRIVER_NAME "tegra"
#define DRIVER_DESC "NVIDIA Tegra graphics"
#define DRIVER_DATE "20120330"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define CDMA_GATHER_FETCHES_MAX_NB 16383
struct tegra_drm_file {
struct idr contexts;
struct mutex lock;
static void tegra_atomic_schedule(struct tegra_drm *tegra,
struct drm_atomic_state *state)
{
tegra->commit.state = state;
schedule_work(&tegra->commit.work);
}
static void tegra_atomic_complete(struct tegra_drm *tegra,
struct drm_atomic_state *state)
{
struct drm_device *drm = tegra->drm;
/*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one condition: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
* supports it, which at least requires that the framebuffers get
* cleaned up with drm_atomic_helper_cleanup_planes()) or completed
* before the new state gets committed on the software side with
* drm_atomic_helper_swap_state().
*
* This scheme allows new atomic state updates to be prepared and
* checked in parallel to the asynchronous completion of the previous
* update. Which is important since compositors need to figure out the
* composition of the next frame right after having submitted the
* current layout.
*/
drm_atomic_helper_commit_modeset_disables(drm, state);
drm_atomic_helper_commit_modeset_enables(drm, state);
drm_atomic_helper_commit_planes(drm, state,
DRM_PLANE_COMMIT_ACTIVE_ONLY);
drm_atomic_helper_wait_for_vblanks(drm, state);
drm_atomic_helper_cleanup_planes(drm, state);
drm_atomic_state_put(state);
}
static void tegra_atomic_work(struct work_struct *work)
{
struct tegra_drm *tegra = container_of(work, struct tegra_drm,
commit.work);
tegra_atomic_complete(tegra, tegra->commit.state);
}
static int tegra_atomic_commit(struct drm_device *drm,
struct drm_atomic_state *state, bool nonblock)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
err = drm_atomic_helper_prepare_planes(drm, state);
if (err)
return err;
/* serialize outstanding nonblocking commits */
mutex_lock(&tegra->commit.lock);
flush_work(&tegra->commit.work);
/*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
*/
err = drm_atomic_helper_swap_state(state, true);
if (err) {
mutex_unlock(&tegra->commit.lock);
drm_atomic_helper_cleanup_planes(drm, state);
return err;
}
drm_atomic_state_get(state);
tegra_atomic_schedule(tegra, state);
else
tegra_atomic_complete(tegra, state);
mutex_unlock(&tegra->commit.lock);
return 0;
}
static const struct drm_mode_config_funcs tegra_drm_mode_funcs = {
.fb_create = tegra_fb_create,
#ifdef CONFIG_DRM_FBDEV_EMULATION
.output_poll_changed = tegra_fb_output_poll_changed,
#endif
.atomic_check = drm_atomic_helper_check,
.atomic_commit = tegra_atomic_commit,
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
struct host1x_device *device = to_host1x_device(drm->dev);
struct tegra_drm *tegra;
tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
u64 carveout_start, carveout_end, gem_start, gem_end;
struct iommu_domain_geometry *geometry;
tegra->domain = iommu_domain_alloc(&platform_bus_type);
if (!tegra->domain) {
err = -ENOMEM;
geometry = &tegra->domain->geometry;
gem_start = geometry->aperture_start;
gem_end = geometry->aperture_end - CARVEOUT_SZ;
carveout_start = gem_end + 1;
carveout_end = geometry->aperture_end;
order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order,
carveout_start >> order,
carveout_end >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
mutex_init(&tegra->mm_lock);
DRM_DEBUG("IOMMU apertures:\n");
DRM_DEBUG(" GEM: %#llx-%#llx\n", gem_start, gem_end);
DRM_DEBUG(" Carveout: %#llx-%#llx\n", carveout_start,
carveout_end);
mutex_init(&tegra->clients_lock);
INIT_LIST_HEAD(&tegra->clients);
mutex_init(&tegra->commit.lock);
INIT_WORK(&tegra->commit.work, tegra_atomic_work);
drm->dev_private = tegra;
tegra->drm = drm;
drm->mode_config.min_width = 0;
drm->mode_config.min_height = 0;
drm->mode_config.max_width = 4096;
drm->mode_config.max_height = 4096;
drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.funcs = &tegra_drm_mode_funcs;
err = tegra_drm_fb_prepare(drm);
if (err < 0)
drm_kms_helper_poll_init(drm);
err = host1x_device_init(device);
Loading
Loading full blame...