aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c68
1 files changed, 38 insertions, 30 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index 6b9187d7f67..288bacac7e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -95,9 +95,9 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
unsigned i, j, pte;
- NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start);
+ NV_DEBUG(dev, "pg=0x%lx\n", mem->start);
- pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT);
+ pte = nouveau_sgdma_pte(nvbe->dev, mem->start << PAGE_SHIFT);
nvbe->pte_start = pte;
for (i = 0; i < nvbe->nr_pages; i++) {
dma_addr_t dma_offset = nvbe->pages[i];
@@ -105,11 +105,13 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
uint32_t offset_h = upper_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50)
- nv_wo32(dev, gpuobj, pte++, offset_l | 3);
- else {
- nv_wo32(dev, gpuobj, pte++, offset_l | 0x21);
- nv_wo32(dev, gpuobj, pte++, offset_h & 0xff);
+ if (dev_priv->card_type < NV_50) {
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 3);
+ pte += 1;
+ } else {
+ nv_wo32(gpuobj, (pte * 4) + 0, offset_l | 0x21);
+ nv_wo32(gpuobj, (pte * 4) + 4, offset_h & 0xff);
+ pte += 2;
}
dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -145,11 +147,13 @@ nouveau_sgdma_unbind(struct ttm_backend *be)
dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus;
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++) {
- if (dev_priv->card_type < NV_50)
- nv_wo32(dev, gpuobj, pte++, dma_offset | 3);
- else {
- nv_wo32(dev, gpuobj, pte++, dma_offset | 0x21);
- nv_wo32(dev, gpuobj, pte++, 0x00000000);
+ if (dev_priv->card_type < NV_50) {
+ nv_wo32(gpuobj, (pte * 4) + 0, dma_offset | 3);
+ pte += 1;
+ } else {
+ nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
+ nv_wo32(gpuobj, (pte * 4) + 4, 0x00000000);
+ pte += 2;
}
dma_offset += NV_CTXDMA_PAGE_SIZE;
@@ -230,7 +234,6 @@ nouveau_sgdma_init(struct drm_device *dev)
}
ret = nouveau_gpuobj_new(dev, NULL, obj_size, 16,
- NVOBJ_FLAG_ALLOW_NO_REFS |
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &gpuobj);
if (ret) {
@@ -239,9 +242,9 @@ nouveau_sgdma_init(struct drm_device *dev)
}
dev_priv->gart_info.sg_dummy_page =
- alloc_page(GFP_KERNEL|__GFP_DMA32);
+ alloc_page(GFP_KERNEL|__GFP_DMA32|__GFP_ZERO);
if (!dev_priv->gart_info.sg_dummy_page) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return -ENOMEM;
}
@@ -250,29 +253,34 @@ nouveau_sgdma_init(struct drm_device *dev)
pci_map_page(pdev, dev_priv->gart_info.sg_dummy_page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(pdev, dev_priv->gart_info.sg_dummy_bus)) {
- nouveau_gpuobj_del(dev, &gpuobj);
+ nouveau_gpuobj_ref(NULL, &gpuobj);
return -EFAULT;
}
if (dev_priv->card_type < NV_50) {
+ /* special case, allocated from global instmem heap so
+ * cinst is invalid, we use it on all channels though so
+ * cinst needs to be valid, set it the same as pinst
+ */
+ gpuobj->cinst = gpuobj->pinst;
+
/* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and
* confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE
* on those cards? */
- nv_wo32(dev, gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
- (1 << 12) /* PT present */ |
- (0 << 13) /* PT *not* linear */ |
- (NV_DMA_ACCESS_RW << 14) |
- (NV_DMA_TARGET_PCI << 16));
- nv_wo32(dev, gpuobj, 1, aper_size - 1);
+ nv_wo32(gpuobj, 0, NV_CLASS_DMA_IN_MEMORY |
+ (1 << 12) /* PT present */ |
+ (0 << 13) /* PT *not* linear */ |
+ (NV_DMA_ACCESS_RW << 14) |
+ (NV_DMA_TARGET_PCI << 16));
+ nv_wo32(gpuobj, 4, aper_size - 1);
for (i = 2; i < 2 + (aper_size >> 12); i++) {
- nv_wo32(dev, gpuobj, i,
- dev_priv->gart_info.sg_dummy_bus | 3);
+ nv_wo32(gpuobj, i * 4,
+ dev_priv->gart_info.sg_dummy_bus | 3);
}
} else {
for (i = 0; i < obj_size; i += 8) {
- nv_wo32(dev, gpuobj, (i+0)/4,
- dev_priv->gart_info.sg_dummy_bus | 0x21);
- nv_wo32(dev, gpuobj, (i+4)/4, 0);
+ nv_wo32(gpuobj, i + 0, 0x00000000);
+ nv_wo32(gpuobj, i + 4, 0x00000000);
}
}
dev_priv->engine.instmem.flush(dev);
@@ -298,7 +306,7 @@ nouveau_sgdma_takedown(struct drm_device *dev)
dev_priv->gart_info.sg_dummy_bus = 0;
}
- nouveau_gpuobj_del(dev, &dev_priv->gart_info.sg_ctxdma);
+ nouveau_gpuobj_ref(NULL, &dev_priv->gart_info.sg_ctxdma);
}
int
@@ -308,9 +316,9 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page)
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
int pte;
- pte = (offset >> NV_CTXDMA_PAGE_SHIFT);
+ pte = (offset >> NV_CTXDMA_PAGE_SHIFT) << 2;
if (dev_priv->card_type < NV_50) {
- *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK;
+ *page = nv_ro32(gpuobj, (pte + 8)) & ~NV_CTXDMA_PAGE_MASK;
return 0;
}