aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 14:57:05 +0100
committerMaarten Lankhorst <maarten.lankhorst@canonical.com>2013-01-15 14:57:05 +0100
commit5e45d7dfd74100d622f9cdc70bfd1f9fae1671de (patch)
treeb12de2542f55d332a73fcd7d863bd2e45fd7d4ef /drivers/gpu/drm/ttm
parent7a1863084c9d90ce4b67d645bf9b0f1612e68f62 (diff)
drm/ttm: add ttm_bo_reserve_slowpath
Instead of dropping everything, waiting for the bo to be unreserved and trying over, a better strategy would be to do a blocking wait. This can be mapped a lot better to a mutex_lock-like call. Signed-off-by: Maarten Lankhorst <maarten.lankhorst@canonical.com> Reviewed-by: Jerome Glisse <jglisse@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index e8e4814b129..4dd6f9e77a7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -310,6 +310,53 @@ int ttm_bo_reserve(struct ttm_buffer_object *bo,
return ret;
}
+int ttm_bo_reserve_slowpath_nolru(struct ttm_buffer_object *bo,
+ bool interruptible, uint32_t sequence)
+{
+ bool wake_up = false;
+ int ret;
+
+ while (unlikely(atomic_xchg(&bo->reserved, 1) != 0)) {
+ WARN_ON(bo->seq_valid && sequence == bo->val_seq);
+
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if ((bo->val_seq - sequence < (1 << 31)) || !bo->seq_valid)
+ wake_up = true;
+
+ /**
+ * Wake up waiters that may need to recheck for deadlock,
+ * if we decreased the sequence number.
+ */
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ if (wake_up)
+ wake_up_all(&bo->event_queue);
+
+ return 0;
+}
+
+int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
+ bool interruptible, uint32_t sequence)
+{
+ struct ttm_bo_global *glob = bo->glob;
+ int put_count, ret;
+
+ ret = ttm_bo_reserve_slowpath_nolru(bo, interruptible, sequence);
+ if (likely(!ret)) {
+ spin_lock(&glob->lru_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&glob->lru_lock);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ }
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_reserve_slowpath);
+
void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
{
ttm_bo_add_to_lru(bo);