[Date Prev][Date Next] [Thread Prev][Thread Next] [Date Index] [Thread Index]

libdrm: Changes to 'upstream-unstable'



 configure.ac                    |    2 
 libdrm/intel/intel_bufmgr_gem.c |   92 +++++++++++++---------------------------
 libdrm/intel/intel_chipset.h    |   10 +++-
 libdrm/radeon/radeon_cs_gem.c   |   12 ++---
 libdrm/xf86drmMode.c            |    3 +
 5 files changed, 48 insertions(+), 71 deletions(-)

New commits:
commit ac71f0849928f4b2fbb69c01304ac6f9df8916a1
Author: Eric Anholt <eric@anholt.net>
Date:   Mon Sep 21 15:29:58 2009 -0700

    Bump to 2.4.14 for release.

diff --git a/configure.ac b/configure.ac
index 425417e..d707052 100644
--- a/configure.ac
+++ b/configure.ac
@@ -19,7 +19,7 @@
 #  CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 
 AC_PREREQ(2.60)
-AC_INIT([libdrm], 2.4.13, [dri-devel@lists.sourceforge.net], libdrm)
+AC_INIT([libdrm], 2.4.14, [dri-devel@lists.sourceforge.net], libdrm)
 AC_USE_SYSTEM_EXTENSIONS
 AC_CONFIG_SRCDIR([Makefile.am])
 AM_INIT_AUTOMAKE([dist-bzip2])

commit 51b89733c53458b6827f0db99eb46a20fa1c7020
Author: Eric Anholt <eric@anholt.net>
Date:   Sun Sep 6 23:17:14 2009 -0700

    intel: Remove the max_entries stuff that complicated bo reuse.
    
    I thought I was going to do all sorts of crazy experiments with it.  I never
    did, and it turned out the free-after-a-few-seconds plan is working out fine.

diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index 84836a8..78297e0 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -69,15 +69,6 @@ typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
 
 struct drm_intel_gem_bo_bucket {
    drmMMListHead head;
-
-   /**
-    * Limit on the number of entries in this bucket.
-    *
-    * 0 means that this caching at this bucket size is disabled.
-    * -1 means that there is no limit to caching at this size.
-    */
-   int max_entries;
-   int num_entries;
    unsigned long size;
 };
 
@@ -105,6 +96,7 @@ typedef struct _drm_intel_bufmgr_gem {
     uint64_t gtt_size;
     int available_fences;
     int pci_device;
+    char bo_reuse;
 } drm_intel_bufmgr_gem;
 
 struct _drm_intel_bo_gem {
@@ -342,7 +334,7 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
     /* If we don't have caching at this size, don't actually round the
      * allocation up.
      */
-    if (bucket == NULL || bucket->max_entries == 0) {
+    if (bucket == NULL) {
 	bo_size = size;
 	if (bo_size < page_size)
 	    bo_size = page_size;
@@ -352,7 +344,7 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
 
     pthread_mutex_lock(&bufmgr_gem->lock);
     /* Get a buffer out of the cache if available */
-    if (bucket != NULL && bucket->num_entries > 0) {
+    if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
 	if (for_render) {
 	    /* Allocate new render-target BOs from the tail (MRU)
 	     * of the list, as it will likely be hot in the GPU cache
@@ -360,7 +352,6 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
 	     */
 	    bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.prev, head);
 	    DRMLISTDEL(&bo_gem->head);
-	    bucket->num_entries--;
 	    alloc_from_cache = 1;
 	} else {
 	    /* For non-render-target BOs (where we're probably going to map it
@@ -374,7 +365,6 @@ drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
 	    if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
 		alloc_from_cache = 1;
 		DRMLISTDEL(&bo_gem->head);
-		bucket->num_entries--;
 	    }
 	}
     }
@@ -553,7 +543,6 @@ drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
 		break;
 
 	    DRMLISTDEL(&bo_gem->head);
-	    bucket->num_entries--;
 
 	    drm_intel_gem_bo_free(&bo_gem->bo);
 	}
@@ -587,11 +576,7 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
 	bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
 	/* Put the buffer into our internal cache for reuse if we can. */
 	tiling_mode = I915_TILING_NONE;
-	if (bo_gem->reusable &&
-	    bucket != NULL &&
-	    (bucket->max_entries == -1 ||
-	     (bucket->max_entries > 0 &&
-	      bucket->num_entries < bucket->max_entries)) &&
+	if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
 	    drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0)
 	{
 	    struct timespec time;
@@ -606,7 +591,6 @@ drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
 	    bo_gem->reloc_count = 0;
 
 	    DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
-	    bucket->num_entries++;
 
 	    drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
 	} else {
@@ -931,7 +915,6 @@ drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
 	while (!DRMLISTEMPTY(&bucket->head)) {
 	    bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
 	    DRMLISTDEL(&bo_gem->head);
-	    bucket->num_entries--;
 
 	    drm_intel_gem_bo_free(&bo_gem->bo);
 	}
@@ -1225,11 +1208,8 @@ void
 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
 {
     drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
-    int i;
 
-    for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
-	bufmgr_gem->cache_bucket[i].max_entries = -1;
-    }
+    bufmgr_gem->bo_reuse = 1;
 }
 
 /**

commit 456a358b9ade5c90ff86b2322a79648c69cddcdc
Author: Eric Anholt <eric@anholt.net>
Date:   Sun Sep 6 23:02:21 2009 -0700

    intel: Remove the old swrast flag for reducing cache flushing.
    
    It hasn't been doing anything effective since
    52e5d24fae4af6f2f4a5304a516c8c5ab347a11b, and we pretty much don't bo_map
    pinned buffers any more anyway.

diff --git a/libdrm/intel/intel_bufmgr_gem.c b/libdrm/intel/intel_bufmgr_gem.c
index baa0ee6..84836a8 100644
--- a/libdrm/intel/intel_bufmgr_gem.c
+++ b/libdrm/intel/intel_bufmgr_gem.c
@@ -127,13 +127,6 @@ struct _drm_intel_bo_gem {
     int validate_index;
 
     /**
-     * Boolean whether we've started swrast
-     * Set when the buffer has been mapped
-     * Cleared when the buffer is unmapped
-     */
-    int swrast;
-
-    /**
      * Current tiling mode
      */
     uint32_t tiling_mode;
@@ -663,30 +656,26 @@ drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
 	    return ret;
 	}
 	bo_gem->mem_virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
-	bo_gem->swrast = 0;
     }
     DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
 	bo_gem->mem_virtual);
     bo->virtual = bo_gem->mem_virtual;
 
-    if (bo_gem->global_name != 0 || !bo_gem->swrast) {
-	set_domain.handle = bo_gem->gem_handle;
-	set_domain.read_domains = I915_GEM_DOMAIN_CPU;
-	if (write_enable)
-	    set_domain.write_domain = I915_GEM_DOMAIN_CPU;
-	else
-	    set_domain.write_domain = 0;
-	do {
-	    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
-			&set_domain);
-	} while (ret == -1 && errno == EINTR);
-	if (ret != 0) {
-	    fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
-		     __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
-	    pthread_mutex_unlock(&bufmgr_gem->lock);
-	    return ret;
-	}
-	bo_gem->swrast = 1;
+    set_domain.handle = bo_gem->gem_handle;
+    set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+    if (write_enable)
+	set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+    else
+	set_domain.write_domain = 0;
+    do {
+	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+		    &set_domain);
+    } while (ret == -1 && errno == EINTR);
+    if (ret != 0) {
+	fprintf (stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
+		 __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
+	pthread_mutex_unlock(&bufmgr_gem->lock);
+	return ret;
     }
 
     pthread_mutex_unlock(&bufmgr_gem->lock);
@@ -797,14 +786,16 @@ drm_intel_gem_bo_unmap(drm_intel_bo *bo)
     assert(bo_gem->mem_virtual != NULL);
 
     pthread_mutex_lock(&bufmgr_gem->lock);
-    if (bo_gem->swrast) {
-	sw_finish.handle = bo_gem->gem_handle;
-	do {
-	    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
-			&sw_finish);
-	} while (ret == -1 && errno == EINTR);
-	bo_gem->swrast = 0;
-    }
+
+    /* Cause a flush to happen if the buffer's pinned for scanout, so the
+     * results show up in a timely manner.
+     */
+    sw_finish.handle = bo_gem->gem_handle;
+    do {
+	ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
+		    &sw_finish);
+    } while (ret == -1 && errno == EINTR);
+
     bo->virtual = NULL;
     pthread_mutex_unlock(&bufmgr_gem->lock);
     return 0;
@@ -1100,9 +1091,6 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
 	drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
 	drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
 
-	/* Need to call swrast on next bo_map */
-	bo_gem->swrast = 0;
-
 	/* Disconnect the buffer from the validate list */
 	bo_gem->validate_index = -1;
 	drm_intel_gem_bo_unreference_locked(bo);

commit 0a24654129847cf5cd61b5e8fb54c8f9c7441c55
Author: Keith Packard <keithp@keithp.com>
Date:   Thu Sep 17 17:28:08 2009 -0700

    drmModeFreeConnector: free encoders and properties
    
    These were leaking.
    
    Signed-off-by: Keith Packard <keithp@keithp.com>

diff --git a/libdrm/xf86drmMode.c b/libdrm/xf86drmMode.c
index ea11207..88b4fe3 100644
--- a/libdrm/xf86drmMode.c
+++ b/libdrm/xf86drmMode.c
@@ -116,6 +116,9 @@ void drmModeFreeConnector(drmModeConnectorPtr ptr)
 	if (!ptr)
 		return;
 
+	drmFree(ptr->encoders);
+	drmFree(ptr->prop_values);
+	drmFree(ptr->props);
 	drmFree(ptr->modes);
 	drmFree(ptr);
 

commit cdd325b59a17a614b90fc2f8b388175e6d79e3cf
Author: Dave Airlie <airlied@linux.ie>
Date:   Tue Sep 15 07:29:02 2009 +1000

    radeon: fix 32/64 bit issue with sign extension
    
    Not sure what intptr_t was up to here.
    
    Reported and tested by: Kevin DeKorte
    Signed-off-by: Dave Airlie <airlied@redhat.com>

diff --git a/libdrm/radeon/radeon_cs_gem.c b/libdrm/radeon/radeon_cs_gem.c
index a0db53b..e42ec48 100644
--- a/libdrm/radeon/radeon_cs_gem.c
+++ b/libdrm/radeon/radeon_cs_gem.c
@@ -100,10 +100,10 @@ static struct radeon_cs *cs_gem_create(struct radeon_cs_manager *csm,
     }
     csg->chunks[0].chunk_id = RADEON_CHUNK_ID_IB;
     csg->chunks[0].length_dw = 0;
-    csg->chunks[0].chunk_data = (uint64_t)(intptr_t)csg->base.packets;
+    csg->chunks[0].chunk_data = (uint64_t)(uintptr_t)csg->base.packets;
     csg->chunks[1].chunk_id = RADEON_CHUNK_ID_RELOCS;
     csg->chunks[1].length_dw = 0;
-    csg->chunks[1].chunk_data = (uint64_t)(intptr_t)csg->relocs;
+    csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
     return (struct radeon_cs*)csg;
 }
 
@@ -184,7 +184,7 @@ static int cs_gem_write_reloc(struct radeon_cs *cs,
         }
         cs->relocs = csg->relocs = tmp;
         csg->nrelocs += 1;
-        csg->chunks[1].chunk_data = (uint64_t)(intptr_t)csg->relocs;
+        csg->chunks[1].chunk_data = (uint64_t)(uintptr_t)csg->relocs;
     }
     csg->relocs_bo[csg->base.crelocs] = bo;
     idx = (csg->base.crelocs++) * RELOC_SIZE;
@@ -269,11 +269,11 @@ static int cs_gem_emit(struct radeon_cs *cs)
 
     csg->chunks[0].length_dw = cs->cdw;
 
-    chunk_array[0] = (uint64_t)(intptr_t)&csg->chunks[0];
-    chunk_array[1] = (uint64_t)(intptr_t)&csg->chunks[1];
+    chunk_array[0] = (uint64_t)(uintptr_t)&csg->chunks[0];
+    chunk_array[1] = (uint64_t)(uintptr_t)&csg->chunks[1];
 
     csg->cs.num_chunks = 2;
-    csg->cs.chunks = (uint64_t)(intptr_t)chunk_array;
+    csg->cs.chunks = (uint64_t)(uintptr_t)chunk_array;
 
     r = drmCommandWriteRead(cs->csm->fd, DRM_RADEON_CS,
                             &csg->cs, sizeof(struct drm_radeon_cs));

commit 67e4172394a88d4922fb8d9c7c3d96ce7e02c5a6
Author: Zhenyu Wang <zhenyuw@linux.intel.com>
Date:   Mon Sep 7 16:17:04 2009 +0800

    libdrm_intel: include B43 chipset check
    
    Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>

diff --git a/libdrm/intel/intel_chipset.h b/libdrm/intel/intel_chipset.h
index 26bc585..688476a 100644
--- a/libdrm/intel/intel_chipset.h
+++ b/libdrm/intel/intel_chipset.h
@@ -50,6 +50,7 @@
                        (dev)->pci_device == 0x2E12 || \
                        (dev)->pci_device == 0x2E22 || \
                        (dev)->pci_device == 0x2E32 || \
+                       (dev)->pci_device == 0x2E42 || \
                        (dev)->pci_device == 0x0042 || \
                        (dev)->pci_device == 0x0046)
 
@@ -59,7 +60,9 @@
 
 #define IS_G4X(dev) ((dev)->pci_device == 0x2E02 || \
                      (dev)->pci_device == 0x2E12 || \
-                     (dev)->pci_device == 0x2E22)
+                     (dev)->pci_device == 0x2E22 || \
+                     (dev)->pci_device == 0x2E32 || \
+                     (dev)->pci_device == 0x2E42)
 
 #define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||  \
                         (dev)->pci_device == 0x29B2 ||  \

commit 121b9648f846d900e67818869974ee82046e9b25
Author: Zhenyu Wang <zhenyuw@linux.intel.com>
Date:   Fri Sep 4 09:24:23 2009 +0800

    libdrm_intel: add new pci ids
    
    New ids for G41, Clarkdale and Arrandale.
    Make sure we don't need to count fence also on new chips.
    
    Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>

diff --git a/libdrm/intel/intel_chipset.h b/libdrm/intel/intel_chipset.h
index 0b3af02..26bc585 100644
--- a/libdrm/intel/intel_chipset.h
+++ b/libdrm/intel/intel_chipset.h
@@ -48,7 +48,10 @@
                        (dev)->pci_device == 0x2A42 || \
                        (dev)->pci_device == 0x2E02 || \
                        (dev)->pci_device == 0x2E12 || \
-                       (dev)->pci_device == 0x2E22)
+                       (dev)->pci_device == 0x2E22 || \
+                       (dev)->pci_device == 0x2E32 || \
+                       (dev)->pci_device == 0x0042 || \
+                       (dev)->pci_device == 0x0046)
 
 #define IS_I965GM(dev) ((dev)->pci_device == 0x2A02)
 


Reply to: