aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_gtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_gtt.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c96
1 files changed, 59 insertions, 37 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index a135c61f4119..9fd25a435536 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -96,11 +96,10 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
- }
- for (i = 0; i < ppgtt->num_pd_entries; i++) {
- dma_addr_t pt_addr;
- if (dev_priv->mm.gtt->needs_dmar) {
+ for (i = 0; i < ppgtt->num_pd_entries; i++) {
+ dma_addr_t pt_addr;
+
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
PCI_DMA_BIDIRECTIONAL);
@@ -112,8 +111,7 @@ int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
}
ppgtt->pt_dma_addr[i] = pt_addr;
- } else
- pt_addr = page_to_phys(ppgtt->pt_pages[i]);
+ }
}
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
@@ -269,7 +267,13 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
BUG();
}
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ i915_ppgtt_insert_sg_entries(ppgtt,
+ obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ pte_flags);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
i915_ppgtt_insert_sg_entries(ppgtt,
@@ -319,7 +323,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
dev_priv->mm.interruptible = false;
- if (i915_gpu_idle(dev_priv->dev, false)) {
+ if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
@@ -346,48 +350,39 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
i915_gem_clflush_object(obj);
- i915_gem_gtt_rebind_object(obj, obj->cache_level);
+ i915_gem_gtt_bind_object(obj, obj->cache_level);
}
intel_gtt_chipset_flush();
}
-int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- unsigned int agp_type = cache_level_to_agp_type(dev, obj->cache_level);
- int ret;
-
- if (dev_priv->mm.gtt->needs_dmar) {
- ret = intel_gtt_map_memory(obj->pages,
- obj->base.size >> PAGE_SHIFT,
- &obj->sg_list,
- &obj->num_sg);
- if (ret != 0)
- return ret;
-
- intel_gtt_insert_sg_entries(obj->sg_list,
- obj->num_sg,
- obj->gtt_space->start >> PAGE_SHIFT,
- agp_type);
- } else
- intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT,
- obj->pages,
- agp_type);
- return 0;
+ if (dev_priv->mm.gtt->needs_dmar)
+ return intel_gtt_map_memory(obj->pages,
+ obj->base.size >> PAGE_SHIFT,
+ &obj->sg_list,
+ &obj->num_sg);
+ else
+ return 0;
}
-void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
- enum i915_cache_level cache_level)
+void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
+ enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned int agp_type = cache_level_to_agp_type(dev, cache_level);
- if (dev_priv->mm.gtt->needs_dmar) {
+ if (obj->sg_table) {
+ intel_gtt_insert_sg_entries(obj->sg_table->sgl,
+ obj->sg_table->nents,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ agp_type);
+ } else if (dev_priv->mm.gtt->needs_dmar) {
BUG_ON(!obj->sg_list);
intel_gtt_insert_sg_entries(obj->sg_list,
@@ -399,19 +394,26 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
obj->base.size >> PAGE_SHIFT,
obj->pages,
agp_type);
+
+ obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+
+ obj->has_global_gtt_mapping = 0;
+}
+
+void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj)
+{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
bool interruptible;
interruptible = do_idling(dev_priv);
- intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
- obj->base.size >> PAGE_SHIFT);
-
if (obj->sg_list) {
intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
obj->sg_list = NULL;
@@ -419,3 +421,23 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
undo_idling(dev_priv, interruptible);
}
+
+void i915_gem_init_global_gtt(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+
+ /* Substract the guard page ... */
+ drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
+
+ dev_priv->mm.gtt_start = start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
+ dev_priv->mm.gtt_end = end;
+ dev_priv->mm.gtt_total = end - start;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+
+ /* ... but ensure that we clear the entire range. */
+ intel_gtt_clear_range(start / PAGE_SIZE, (end-start) / PAGE_SIZE);
+}