drm-snapshot: Changes to 'upstream-experimental'
configure.ac | 9
libdrm/Makefile.am | 6
libdrm/nouveau/libdrm_nouveau.pc.in | 2
libdrm/nouveau/nouveau_bo.c | 187 +-
libdrm/nouveau/nouveau_bo.h | 22
libdrm/nouveau/nouveau_device.c | 6
libdrm/nouveau/nouveau_device.h | 2
libdrm/nouveau/nouveau_drmif.h | 3
libdrm/nouveau/nouveau_private.h | 1
libdrm/nouveau/nouveau_pushbuf.c | 4
libdrm/nouveau/nouveau_pushbuf.h | 23
libdrm/radeon/Makefile.am | 53
libdrm/radeon/libdrm_radeon.pc.in | 10
libdrm/radeon/radeon_bo.h | 179 ++
libdrm/radeon/radeon_bo_gem.c | 254 +++
libdrm/radeon/radeon_bo_gem.h | 42
libdrm/radeon/radeon_cs.h | 208 ++
libdrm/radeon/radeon_cs_gem.c | 551 +++++++
libdrm/radeon/radeon_cs_gem.h | 41
libdrm/radeon/radeon_track.c | 140 +
libdrm/radeon/radeon_track.h | 64
linux-core/Makefile | 2
linux-core/Makefile.kernel | 7
linux-core/drmP.h | 21
linux-core/drm_agpsupport.c | 171 --
linux-core/drm_bo.c | 2730 ------------------------------------
linux-core/drm_bo_lock.c | 189 --
linux-core/drm_bo_move.c | 614 --------
linux-core/drm_compat.c | 99 -
linux-core/drm_drv.c | 36
linux-core/drm_fence.c | 829 ----------
linux-core/drm_fops.c | 35
linux-core/drm_object.c | 294 ---
linux-core/drm_objects.h | 821 ----------
linux-core/drm_os_linux.h | 8
linux-core/drm_proc.c | 102 -
linux-core/drm_stub.c | 14
linux-core/drm_sysfs.c | 8
linux-core/drm_ttm.c | 512 ------
linux-core/drm_vm.c | 198 --
linux-core/nouveau_backlight.c | 93 +
linux-core/via_buffer.c | 163 --
linux-core/via_fence.c | 169 --
linux-core/xgi_cmdlist.c | 3
linux-core/xgi_drv.c | 11
linux-core/xgi_drv.h | 12
linux-core/xgi_fence.c | 87 -
shared-core/drm.h | 318 ----
shared-core/i915_drm.h | 52
shared-core/nouveau_drm.h | 35
shared-core/nouveau_drv.h | 2
shared-core/nouveau_reg.h | 4
shared-core/nv50_graph.c | 1
shared-core/radeon_drm.h | 131 +
shared-core/via_drv.c | 40
shared-core/via_drv.h | 14
shared-core/via_map.c | 10
tests/modetest/modetest.c | 2
58 files changed, 2002 insertions(+), 7642 deletions(-)
New commits:
commit de1ed01214874dcdd6116ff2587c8710d6ed4d2d
Author: Dave Airlie <airlied@redhat.com>
Date: Tue Jun 30 12:19:28 2009 +1000
radeon: add support for write followed by read relocs.
the DDX does this and used to handle it internally
diff --git a/libdrm/radeon/radeon_cs_gem.c b/libdrm/radeon/radeon_cs_gem.c
index b9f6f4b..82ef16c 100644
--- a/libdrm/radeon/radeon_cs_gem.c
+++ b/libdrm/radeon/radeon_cs_gem.c
@@ -144,12 +144,19 @@ static int cs_gem_write_reloc(struct radeon_cs *cs,
* domain set then the read_domain should also be set for this
* new relocation.
*/
- if (reloc->read_domain && !read_domain) {
- return -EINVAL;
- }
- if (reloc->write_domain && !write_domain) {
- return -EINVAL;
+ /* the DDX expects to read and write from same pixmap */
+ if (write_domain && (reloc->read_domain & write_domain)) {
+ reloc->read_domain = 0;
+ reloc->write_domain = write_domain;
+ } else if (read_domain & reloc->write_domain) {
+ reloc->read_domain = 0;
+ } else {
+ if (write_domain != reloc->write_domain)
+ return -EINVAL;
+ if (read_domain != reloc->read_domain)
+ return -EINVAL;
}
+
reloc->read_domain |= read_domain;
reloc->write_domain |= write_domain;
/* update flags */
commit 790097c51330090b2b7b90429b9ab8ddf259fd8e
Author: Dave Airlie <airlied@redhat.com>
Date: Tue Jun 23 09:51:05 2009 +1000
remove some old ttm bits
diff --git a/shared-core/i915_drm.h b/shared-core/i915_drm.h
index c8fec5f..2539966 100644
--- a/shared-core/i915_drm.h
+++ b/shared-core/i915_drm.h
@@ -395,58 +395,6 @@ typedef struct drm_i915_hws_addr {
uint64_t addr;
} drm_i915_hws_addr_t;
-/*
- * Relocation header is 4 uint32_ts
- * 0 - 32 bit reloc count
- * 1 - 32-bit relocation type
- * 2-3 - 64-bit user buffer handle ptr for another list of relocs.
- */
-#define I915_RELOC_HEADER 4
-
-/*
- * type 0 relocation has 4-uint32_t stride
- * 0 - offset into buffer
- * 1 - delta to add in
- * 2 - buffer handle
- * 3 - reserved (for optimisations later).
- */
-/*
- * type 1 relocation has 4-uint32_t stride.
- * Hangs off the first item in the op list.
- * Performed after all valiations are done.
- * Try to group relocs into the same relocatee together for
- * performance reasons.
- * 0 - offset into buffer
- * 1 - delta to add in
- * 2 - buffer index in op list.
- * 3 - relocatee index in op list.
- */
-#define I915_RELOC_TYPE_0 0
-#define I915_RELOC0_STRIDE 4
-#define I915_RELOC_TYPE_1 1
-#define I915_RELOC1_STRIDE 4
-
-
-struct drm_i915_op_arg {
- uint64_t next;
- uint64_t reloc_ptr;
- int handled;
- unsigned int pad64;
- union {
- struct drm_bo_op_req req;
- struct drm_bo_arg_rep rep;
- } d;
-
-};
-
-struct drm_i915_execbuffer {
- uint64_t ops_list;
- uint32_t num_buffers;
- struct drm_i915_batchbuffer batch;
- drm_context_t context; /* for lockless use in the future */
- struct drm_fence_arg fence_arg;
-};
-
struct drm_i915_gem_init {
/**
* Beginning offset in the GTT to be managed by the DRM memory
commit 9a33f62be1c478334572ea9384af60c37d1644a0
Author: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
Date: Mon Jun 22 23:07:26 2009 +0200
drm: Strip old ttm.
Signed-off-by: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
diff --git a/linux-core/Makefile b/linux-core/Makefile
index 9257a49..591b6cf 100644
--- a/linux-core/Makefile
+++ b/linux-core/Makefile
@@ -58,7 +58,7 @@ endif
# Modules for all architectures
MODULE_LIST := drm.o tdfx.o r128.o radeon.o mga.o sis.o savage.o via.o \
- mach64.o nouveau.o xgi.o
+ mach64.o xgi.o
# Modules only for ix86 architectures
ifneq (,$(findstring 86,$(MACHINE)))
diff --git a/linux-core/Makefile.kernel b/linux-core/Makefile.kernel
index 0bea35d..e83f15c 100644
--- a/linux-core/Makefile.kernel
+++ b/linux-core/Makefile.kernel
@@ -12,9 +12,8 @@ drm-objs := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
drm_sysfs.o drm_pci.o drm_agpsupport.o drm_scatter.o \
drm_memory_debug.o ati_pcigart.o drm_sman.o \
- drm_hashtab.o drm_mm.o drm_object.o drm_compat.o \
- drm_fence.o drm_ttm.o drm_bo.o drm_bo_move.o drm_bo_lock.o \
- drm_regman.o drm_vm_nopage_compat.o drm_gem.o
+ drm_hashtab.o drm_mm.o drm_compat.o \
+ drm_vm_nopage_compat.o drm_gem.o
tdfx-objs := tdfx_drv.o
r128-objs := r128_drv.o r128_cce.o r128_state.o r128_irq.o
mga-objs := mga_drv.o mga_dma.o mga_state.o mga_warp.o mga_irq.o
@@ -35,7 +34,7 @@ sis-objs := sis_drv.o sis_mm.o
ffb-objs := ffb_drv.o ffb_context.o
savage-objs := savage_drv.o savage_bci.o savage_state.o
via-objs := via_irq.o via_drv.o via_map.o via_mm.o via_dma.o via_verifier.o \
- via_video.o via_dmablit.o via_fence.o via_buffer.o
+ via_video.o via_dmablit.o
mach64-objs := mach64_drv.o mach64_dma.o mach64_irq.o mach64_state.o
xgi-objs := xgi_cmdlist.o xgi_drv.o xgi_fb.o xgi_misc.o xgi_pcie.o \
xgi_fence.o
diff --git a/linux-core/drmP.h b/linux-core/drmP.h
index 6770282..9b4b071 100644
--- a/linux-core/drmP.h
+++ b/linux-core/drmP.h
@@ -155,8 +155,6 @@ typedef unsigned long uintptr_t;
#define DRM_MEM_MM 22
#define DRM_MEM_HASHTAB 23
#define DRM_MEM_OBJECTS 24
-#define DRM_MEM_FENCE 25
-#define DRM_MEM_TTM 26
#define DRM_MEM_BUFOBJ 27
#define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
@@ -660,8 +658,6 @@ struct drm_gem_object {
void *driver_private;
};
-#include "drm_objects.h"
-
/**
* DRM driver structure. This structure represent the common code for
* a family of cards. There will one drm_device for each card present
@@ -855,10 +851,6 @@ struct drm_device {
struct list_head maplist; /**< Linked list of regions */
int map_count; /**< Number of mappable regions */
struct drm_open_hash map_hash; /**< User token hash table for maps */
- struct drm_mm offset_manager; /**< User token manager */
- struct drm_open_hash object_hash; /**< User token hash table for objects */
- struct address_space *dev_mapping; /**< For unmap_mapping_range() */
- struct page *ttm_dummy_page;
/** \name Context handle management */
/*@{ */
@@ -953,9 +945,6 @@ struct drm_device {
unsigned int agp_buffer_token;
struct drm_minor *primary; /**< render type primary screen head */
- struct drm_fence_manager fm;
- struct drm_buffer_manager bm;
-
/** \name Drawable information */
/*@{ */
spinlock_t drw_lock;
@@ -978,15 +967,6 @@ struct drm_device {
/*@} */
};
-#if __OS_HAS_AGP
-struct drm_agp_ttm_backend {
- struct drm_ttm_backend backend;
- DRM_AGP_MEM *mem;
- struct agp_bridge_data *bridge;
- int populated;
-};
-#endif
-
static __inline__ int drm_core_check_feature(struct drm_device *dev,
int feature)
@@ -1279,7 +1259,6 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size
extern int drm_agp_free_memory(DRM_AGP_MEM * handle);
extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start);
extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle);
-extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev);
extern void drm_agp_chipset_flush(struct drm_device *dev);
/* Stub support (drm_stub.h) */
extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c
index 2abfac6..9f746a3 100644
--- a/linux-core/drm_agpsupport.c
+++ b/linux-core/drm_agpsupport.c
@@ -498,177 +498,6 @@ drm_agp_bind_pages(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_agp_bind_pages);
-/*
- * AGP ttm backend interface.
- */
-
-#ifndef AGP_USER_TYPES
-#define AGP_USER_TYPES (1 << 16)
-#define AGP_USER_MEMORY (AGP_USER_TYPES)
-#define AGP_USER_CACHED_MEMORY (AGP_USER_TYPES + 1)
-#endif
-#define AGP_REQUIRED_MAJOR 0
-#define AGP_REQUIRED_MINOR 102
-
-static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend)
-{
- return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1);
-}
-
-
-static int drm_agp_populate(struct drm_ttm_backend *backend,
- unsigned long num_pages, struct page **pages,
- struct page *dummy_read_page)
-{
- struct drm_agp_ttm_backend *agp_be =
- container_of(backend, struct drm_agp_ttm_backend, backend);
- struct page **cur_page, **last_page = pages + num_pages;
- DRM_AGP_MEM *mem;
- int dummy_page_count = 0;
-
- if (drm_alloc_memctl(num_pages * sizeof(void *)))
- return -1;
-
- DRM_DEBUG("drm_agp_populate_ttm\n");
- mem = drm_agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
- if (!mem) {
- drm_free_memctl(num_pages * sizeof(void *));
- return -1;
- }
-
- DRM_DEBUG("Current page count is %ld\n", (long) mem->page_count);
- mem->page_count = 0;
- for (cur_page = pages; cur_page < last_page; ++cur_page) {
- struct page *page = *cur_page;
- if (!page) {
- page = dummy_read_page;
- ++dummy_page_count;
- }
- mem->memory[mem->page_count++] = phys_to_gart(page_to_phys(page));
- }
- if (dummy_page_count)
- DRM_DEBUG("Mapped %d dummy pages\n", dummy_page_count);
- agp_be->mem = mem;
- return 0;
-}
-
-static int drm_agp_bind_ttm(struct drm_ttm_backend *backend,
- struct drm_bo_mem_reg *bo_mem)
-{
- struct drm_agp_ttm_backend *agp_be =
- container_of(backend, struct drm_agp_ttm_backend, backend);
- DRM_AGP_MEM *mem = agp_be->mem;
- int ret;
- int snooped = (bo_mem->flags & DRM_BO_FLAG_CACHED) && !(bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED);
-
- DRM_DEBUG("drm_agp_bind_ttm\n");
- mem->is_flushed = true;
- mem->type = AGP_USER_MEMORY;
- /* CACHED MAPPED implies not snooped memory */
- if (snooped)
- mem->type = AGP_USER_CACHED_MEMORY;
-
- ret = drm_agp_bind_memory(mem, bo_mem->mm_node->start);
- if (ret)
- DRM_ERROR("AGP Bind memory failed\n");
-
- DRM_FLAG_MASKED(backend->flags, (bo_mem->flags & DRM_BO_FLAG_CACHED) ?
- DRM_BE_FLAG_BOUND_CACHED : 0,
- DRM_BE_FLAG_BOUND_CACHED);
- return ret;
-}
-
-static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend)
-{
- struct drm_agp_ttm_backend *agp_be =
- container_of(backend, struct drm_agp_ttm_backend, backend);
-
- DRM_DEBUG("drm_agp_unbind_ttm\n");
- if (agp_be->mem->is_bound)
- return drm_agp_unbind_memory(agp_be->mem);
- else
- return 0;
-}
-
-static void drm_agp_clear_ttm(struct drm_ttm_backend *backend)
-{
- struct drm_agp_ttm_backend *agp_be =
- container_of(backend, struct drm_agp_ttm_backend, backend);
- DRM_AGP_MEM *mem = agp_be->mem;
-
- DRM_DEBUG("drm_agp_clear_ttm\n");
- if (mem) {
- unsigned long num_pages = mem->page_count;
- backend->func->unbind(backend);
- agp_free_memory(mem);
- drm_free_memctl(num_pages * sizeof(void *));
- }
- agp_be->mem = NULL;
-}
-
-static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend)
-{
- struct drm_agp_ttm_backend *agp_be;
-
- if (backend) {
- DRM_DEBUG("drm_agp_destroy_ttm\n");
- agp_be = container_of(backend, struct drm_agp_ttm_backend, backend);
- if (agp_be) {
- if (agp_be->mem)
- backend->func->clear(backend);
- drm_ctl_free(agp_be, sizeof(*agp_be), DRM_MEM_TTM);
- }
- }
-}
-
-static struct drm_ttm_backend_func agp_ttm_backend = {
- .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust,
- .populate = drm_agp_populate,
- .clear = drm_agp_clear_ttm,
- .bind = drm_agp_bind_ttm,
- .unbind = drm_agp_unbind_ttm,
- .destroy = drm_agp_destroy_ttm,
-};
-
-struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev)
-{
-
- struct drm_agp_ttm_backend *agp_be;
- struct agp_kern_info *info;
-
- if (!dev->agp) {
- DRM_ERROR("AGP is not initialized.\n");
- return NULL;
- }
- info = &dev->agp->agp_info;
-
- if (info->version.major != AGP_REQUIRED_MAJOR ||
- info->version.minor < AGP_REQUIRED_MINOR) {
- DRM_ERROR("Wrong agpgart version %d.%d\n"
- "\tYou need at least version %d.%d.\n",
- info->version.major,
- info->version.minor,
- AGP_REQUIRED_MAJOR,
- AGP_REQUIRED_MINOR);
- return NULL;
- }
-
-
- agp_be = drm_ctl_calloc(1, sizeof(*agp_be), DRM_MEM_TTM);
- if (!agp_be)
- return NULL;
-
- agp_be->mem = NULL;
-
- agp_be->bridge = dev->agp->bridge;
- agp_be->populated = false;
- agp_be->backend.func = &agp_ttm_backend;
- agp_be->backend.dev = dev;
-
- return &agp_be->backend;
-}
-EXPORT_SYMBOL(drm_agp_init_ttm);
-
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
void drm_agp_chipset_flush(struct drm_device *dev)
{
diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c
deleted file mode 100644
index f43480c..0000000
--- a/linux-core/drm_bo.c
+++ /dev/null
@@ -1,2730 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstr�thomas-at-tungstengraphics-dot-com>
- */
-
-#include "drmP.h"
-
-/*
- * Locking may look a bit complicated but isn't really:
- *
- * The buffer usage atomic_t needs to be protected by dev->struct_mutex
- * when there is a chance that it can be zero before or after the operation.
- *
- * dev->struct_mutex also protects all lists and list heads,
- * Hash tables and hash heads.
- *
- * bo->mutex protects the buffer object itself excluding the usage field.
- * bo->mutex does also protect the buffer list heads, so to manipulate those,
- * we need both the bo->mutex and the dev->struct_mutex.
- *
- * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
- * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
- * the list traversal will, in general, need to be restarted.
- *
- */
-
-static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
-static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
-static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
-static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
-
-static inline uint64_t drm_bo_type_flags(unsigned type)
-{
- return (1ULL << (24 + type));
-}
-
-/*
- * bo locked. dev->struct_mutex locked.
- */
-
-void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
-{
- struct drm_mem_type_manager *man;
-
- DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
- DRM_ASSERT_LOCKED(&bo->mutex);
-
- man = &bo->dev->bm.man[bo->pinned_mem_type];
- list_add_tail(&bo->pinned_lru, &man->pinned);
-}
-
-void drm_bo_add_to_lru(struct drm_buffer_object *bo)
-{
- struct drm_mem_type_manager *man;
-
- DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-
- if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
- || bo->mem.mem_type != bo->pinned_mem_type) {
- man = &bo->dev->bm.man[bo->mem.mem_type];
- list_add_tail(&bo->lru, &man->lru);
- } else {
- INIT_LIST_HEAD(&bo->lru);
- }
-}
-
-static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
-{
- if (!bo->map_list.map)
- return 0;
-
- drm_bo_unmap_virtual(bo);
- return 0;
-}
-
-/*
- * Call bo->mutex locked.
- */
-
-static int drm_bo_add_ttm(struct drm_buffer_object *bo)
-{
- struct drm_device *dev = bo->dev;
- int ret = 0;
- uint32_t page_flags = 0;
-
- DRM_ASSERT_LOCKED(&bo->mutex);
- bo->ttm = NULL;
-
- if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
- page_flags |= DRM_TTM_PAGE_WRITE;
-
- switch (bo->type) {
- case drm_bo_type_device:
- case drm_bo_type_kernel:
- bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
- page_flags, dev->bm.dummy_read_page);
- if (!bo->ttm)
- ret = -ENOMEM;
- break;
- case drm_bo_type_user:
- bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
- page_flags | DRM_TTM_PAGE_USER,
- dev->bm.dummy_read_page);
- if (!bo->ttm)
- ret = -ENOMEM;
-
- ret = drm_ttm_set_user(bo->ttm, current,
- bo->buffer_start,
- bo->num_pages);
- if (ret)
- return ret;
-
- break;
- default:
- DRM_ERROR("Illegal buffer object type\n");
- ret = -EINVAL;
- break;
- }
-
- return ret;
-}
-
-static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
- struct drm_bo_mem_reg *mem,
- int evict, int no_wait)
-{
- struct drm_device *dev = bo->dev;
- struct drm_buffer_manager *bm = &dev->bm;
- int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
- int new_is_pci = drm_mem_reg_is_pci(dev, mem);
- struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
- struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
- int ret = 0;
-
- if (old_is_pci || new_is_pci ||
- ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
- ret = drm_bo_vm_pre_move(bo, old_is_pci);
- if (ret)
- return ret;
-
- /*
- * Create and bind a ttm if required.
- */
-
- if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
- ret = drm_bo_add_ttm(bo);
- if (ret)
- goto out_err;
-
- if (mem->mem_type != DRM_BO_MEM_LOCAL) {
- ret = drm_ttm_bind(bo->ttm, mem);
- if (ret)
- goto out_err;
- }
-
- if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
-
- struct drm_bo_mem_reg *old_mem = &bo->mem;
- uint64_t save_flags = old_mem->flags;
- uint64_t save_proposed_flags = old_mem->proposed_flags;
-
- *old_mem = *mem;
- mem->mm_node = NULL;
- old_mem->proposed_flags = save_proposed_flags;
- DRM_FLAG_MASKED(save_flags, mem->flags,
- DRM_BO_MASK_MEMTYPE);
- goto moved;
- }
-
- }
-
- if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
- !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))
- ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
- else if (dev->driver->bo_driver->move)
- ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
- else
- ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
-
- if (ret)
- goto out_err;
-
-moved:
- if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
- ret =
- dev->driver->bo_driver->invalidate_caches(dev,
- bo->mem.flags);
- if (ret)
- DRM_ERROR("Can not flush read caches\n");
- }
-
- DRM_FLAG_MASKED(bo->priv_flags,
- (evict) ? _DRM_BO_FLAG_EVICTED : 0,
- _DRM_BO_FLAG_EVICTED);
-
- if (bo->mem.mm_node)
- bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
- bm->man[bo->mem.mem_type].gpu_offset;
-
-
- return 0;
-
-out_err:
- new_man = &bm->man[bo->mem.mem_type];
- if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
- drm_ttm_unbind(bo->ttm);
- drm_ttm_destroy(bo->ttm);
- bo->ttm = NULL;
- }
-
- return ret;
-}
-
-/*
- * Call bo->mutex locked.
- * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
- */
-
-static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
-{
- struct drm_fence_object *fence = bo->fence;
-
- if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
- return -EBUSY;
-
- if (fence) {
- if (drm_fence_object_signaled(fence, bo->fence_type)) {
- drm_fence_usage_deref_unlocked(&bo->fence);
- return 0;
- }
- drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
- if (drm_fence_object_signaled(fence, bo->fence_type)) {
- drm_fence_usage_deref_unlocked(&bo->fence);
- return 0;
- }
- return -EBUSY;
- }
- return 0;
-}
-
-static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
-{
- int ret;
-
- mutex_lock(&bo->mutex);
- ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
- mutex_unlock(&bo->mutex);
- return ret;
-}
-
-
-/*
- * Call bo->mutex locked.
- * Wait until the buffer is idle.
- */
-
-int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
- int no_wait, int check_unfenced)
-{
- int ret;
-
- DRM_ASSERT_LOCKED(&bo->mutex);
- while(unlikely(drm_bo_busy(bo, check_unfenced))) {
- if (no_wait)
- return -EBUSY;
-
- if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
- mutex_unlock(&bo->mutex);
- wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
- mutex_lock(&bo->mutex);
- bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
- }
-
- if (bo->fence) {
- struct drm_fence_object *fence;
- uint32_t fence_type = bo->fence_type;
-
- drm_fence_reference_unlocked(&fence, bo->fence);
- mutex_unlock(&bo->mutex);
-
- ret = drm_fence_object_wait(fence, lazy, !interruptible,
- fence_type);
-
- drm_fence_usage_deref_unlocked(&fence);
- mutex_lock(&bo->mutex);
- bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
- if (ret)
- return ret;
- }
-
- }
- return 0;
-}
-EXPORT_SYMBOL(drm_bo_wait);
-
-static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
-{
- struct drm_device *dev = bo->dev;
- struct drm_buffer_manager *bm = &dev->bm;
-
- if (bo->fence) {
- if (bm->nice_mode) {
- unsigned long _end = jiffies + 3 * DRM_HZ;
- int ret;
- do {
- ret = drm_bo_wait(bo, 0, 0, 0, 0);
- if (ret && allow_errors)
- return ret;
-
- } while (ret && !time_after_eq(jiffies, _end));
-
- if (bo->fence) {
- bm->nice_mode = 0;
- DRM_ERROR("Detected GPU lockup or "
- "fence driver was taken down. "
- "Evicting buffer.\n");
- }
- }
- if (bo->fence)
- drm_fence_usage_deref_unlocked(&bo->fence);
- }
- return 0;
-}
-
-/*
- * Call dev->struct_mutex locked.
- * Attempts to remove all private references to a buffer by expiring its
- * fence object and removing from lru lists and memory managers.
- */
-
-static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
-{
- struct drm_device *dev = bo->dev;
- struct drm_buffer_manager *bm = &dev->bm;
-
- DRM_ASSERT_LOCKED(&dev->struct_mutex);
-
- atomic_inc(&bo->usage);
- mutex_unlock(&dev->struct_mutex);
- mutex_lock(&bo->mutex);
-
- DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-
- if (bo->fence && drm_fence_object_signaled(bo->fence,
- bo->fence_type))
- drm_fence_usage_deref_unlocked(&bo->fence);
-
- if (bo->fence && remove_all)
- (void)drm_bo_expire_fence(bo, 0);
-
- mutex_lock(&dev->struct_mutex);
-
- if (!atomic_dec_and_test(&bo->usage))
- goto out;
-
- if (!bo->fence) {
- list_del_init(&bo->lru);
- if (bo->mem.mm_node) {
- drm_mm_put_block(bo->mem.mm_node);
- if (bo->pinned_node == bo->mem.mm_node)
- bo->pinned_node = NULL;
- bo->mem.mm_node = NULL;
- }
- list_del_init(&bo->pinned_lru);
- if (bo->pinned_node) {
- drm_mm_put_block(bo->pinned_node);
- bo->pinned_node = NULL;
- }
- list_del_init(&bo->ddestroy);
- mutex_unlock(&bo->mutex);
- drm_bo_destroy_locked(bo);
- return;
- }
-
- if (list_empty(&bo->ddestroy)) {
- drm_fence_object_flush(bo->fence, bo->fence_type);
- list_add_tail(&bo->ddestroy, &bm->ddestroy);
- schedule_delayed_work(&bm->wq,
- ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
- }
-
-out:
- mutex_unlock(&bo->mutex);
- return;
-}
-
-/*
- * Verify that refcount is 0 and that there are no internal references
- * to the buffer object. Then destroy it.
- */
-
-static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
-{
- struct drm_device *dev = bo->dev;
- struct drm_buffer_manager *bm = &dev->bm;
-
- DRM_ASSERT_LOCKED(&dev->struct_mutex);
-
- if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
- list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
- list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
- if (bo->fence != NULL) {
- DRM_ERROR("Fence was non-zero.\n");
- drm_bo_cleanup_refs(bo, 0);
- return;
- }
-
- if (bo->ttm) {
- drm_ttm_unbind(bo->ttm);
- drm_ttm_destroy(bo->ttm);
- bo->ttm = NULL;
- }
-
- atomic_dec(&bm->count);
-
- drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
-
- return;
- }
-
- /*
- * Some stuff is still trying to reference the buffer object.
- * Get rid of those references.
- */
-
- drm_bo_cleanup_refs(bo, 0);
-
- return;
-}
-
-/*
- * Call dev->struct_mutex locked.
- */
-
-static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
-{
- struct drm_buffer_manager *bm = &dev->bm;
-
- struct drm_buffer_object *entry, *nentry;
- struct list_head *list, *next;
-
- list_for_each_safe(list, next, &bm->ddestroy) {
- entry = list_entry(list, struct drm_buffer_object, ddestroy);
-
- nentry = NULL;
- if (next != &bm->ddestroy) {
- nentry = list_entry(next, struct drm_buffer_object,
- ddestroy);
- atomic_inc(&nentry->usage);
- }
-
- drm_bo_cleanup_refs(entry, remove_all);
-
- if (nentry)
- atomic_dec(&nentry->usage);
- }
-}
-
-static void drm_bo_delayed_workqueue(struct work_struct *work)
-{
- struct drm_buffer_manager *bm =
- container_of(work, struct drm_buffer_manager, wq.work);
- struct drm_device *dev = container_of(bm, struct drm_device, bm);
-
- DRM_DEBUG("Delayed delete Worker\n");
-
- mutex_lock(&dev->struct_mutex);
- if (!bm->initialized) {
- mutex_unlock(&dev->struct_mutex);
- return;
- }
- drm_bo_delayed_delete(dev, 0);
- if (bm->initialized && !list_empty(&bm->ddestroy)) {
- schedule_delayed_work(&bm->wq,
- ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
- }
- mutex_unlock(&dev->struct_mutex);
-}
-
-void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
-{
- struct drm_buffer_object *tmp_bo = *bo;
- bo = NULL;
-
- DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
-
- if (atomic_dec_and_test(&tmp_bo->usage))
- drm_bo_destroy_locked(tmp_bo);
-}
-EXPORT_SYMBOL(drm_bo_usage_deref_locked);
-
-static void drm_bo_base_deref_locked(struct drm_file *file_priv,
- struct drm_user_object *uo)
-{
- struct drm_buffer_object *bo =
- drm_user_object_entry(uo, struct drm_buffer_object, base);
-
- DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-
- drm_bo_takedown_vm_locked(bo);
- drm_bo_usage_deref_locked(&bo);
-}
-
-void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
-{
Reply to: