aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_sgdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c179
1 files changed, 53 insertions, 126 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index c8a463b76c89..47f245edf538 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -8,91 +8,30 @@
#define NV_CTXDMA_PAGE_MASK (NV_CTXDMA_PAGE_SIZE - 1)
struct nouveau_sgdma_be {
- struct ttm_backend backend;
+ /* this has to be the first field so populate/unpopulated in
+ * nouve_bo.c works properly, otherwise have to move them here
+ */
+ struct ttm_dma_tt ttm;
struct drm_device *dev;
-
- dma_addr_t *pages;
- unsigned nr_pages;
- bool unmap_pages;
-
u64 offset;
- bool bound;
};
-static int
-nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
- struct page **pages, struct page *dummy_read_page,
- dma_addr_t *dma_addrs)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
- int i;
-
- NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
-
- nvbe->pages = dma_addrs;
- nvbe->nr_pages = num_pages;
- nvbe->unmap_pages = true;
-
- /* this code path isn't called and is incorrect anyways */
- if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
- nvbe->unmap_pages = false;
- return 0;
- }
-
- for (i = 0; i < num_pages; i++) {
- nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
- nvbe->nr_pages = --i;
- be->func->clear(be);
- return -EFAULT;
- }
- }
-
- return 0;
-}
-
static void
-nouveau_sgdma_clear(struct ttm_backend *be)
+nouveau_sgdma_destroy(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct drm_device *dev = nvbe->dev;
-
- if (nvbe->bound)
- be->func->unbind(be);
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
- if (nvbe->unmap_pages) {
- while (nvbe->nr_pages--) {
- pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- }
- nvbe->unmap_pages = false;
- }
-
- nvbe->pages = NULL;
-}
-
-static void
-nouveau_sgdma_destroy(struct ttm_backend *be)
-{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-
- if (be) {
+ if (ttm) {
NV_DEBUG(nvbe->dev, "\n");
-
- if (nvbe) {
- if (nvbe->pages)
- be->func->clear(be);
- kfree(nvbe);
- }
+ ttm_dma_tt_fini(&nvbe->ttm);
+ kfree(nvbe);
}
}
static int
-nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -102,8 +41,8 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
nvbe->offset = mem->start << PAGE_SHIFT;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < nvbe->nr_pages; i++) {
- dma_addr_t dma_offset = nvbe->pages[i];
+ for (i = 0; i < ttm->num_pages; i++) {
+ dma_addr_t dma_offset = nvbe->ttm.dma_address[i];
uint32_t offset_l = lower_32_bits(dma_offset);
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++) {
@@ -112,14 +51,13 @@ nv04_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
}
- nvbe->bound = true;
return 0;
}
static int
-nv04_sgdma_unbind(struct ttm_backend *be)
+nv04_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma;
@@ -127,22 +65,19 @@ nv04_sgdma_unbind(struct ttm_backend *be)
NV_DEBUG(dev, "\n");
- if (!nvbe->bound)
+ if (ttm->state != tt_bound)
return 0;
pte = (nvbe->offset >> NV_CTXDMA_PAGE_SHIFT) + 2;
- for (i = 0; i < nvbe->nr_pages; i++) {
+ for (i = 0; i < ttm->num_pages; i++) {
for (j = 0; j < PAGE_SIZE / NV_CTXDMA_PAGE_SIZE; j++, pte++)
nv_wo32(gpuobj, (pte * 4) + 0, 0x00000000);
}
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
@@ -161,14 +96,14 @@ nv41_sgdma_flush(struct nouveau_sgdma_be *nvbe)
}
static int
-nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv41_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->pages;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -178,18 +113,17 @@ nv41_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
}
nv41_sgdma_flush(nvbe);
- nvbe->bound = true;
return 0;
}
static int
-nv41_sgdma_unbind(struct ttm_backend *be)
+nv41_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
while (cnt--) {
nv_wo32(pgt, pte, 0x00000000);
@@ -197,24 +131,22 @@ nv41_sgdma_unbind(struct ttm_backend *be)
}
nv41_sgdma_flush(nvbe);
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv41_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv41_sgdma_bind,
.unbind = nv41_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static void
-nv44_sgdma_flush(struct nouveau_sgdma_be *nvbe)
+nv44_sgdma_flush(struct ttm_tt *ttm)
{
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_device *dev = nvbe->dev;
- nv_wr32(dev, 0x100814, (nvbe->nr_pages - 1) << 12);
+ nv_wr32(dev, 0x100814, (ttm->num_pages - 1) << 12);
nv_wr32(dev, 0x100808, nvbe->offset | 0x20);
if (!nv_wait(dev, 0x100808, 0x00000001, 0x00000001))
NV_ERROR(dev, "gart flush timeout: 0x%08x\n",
@@ -273,14 +205,14 @@ nv44_sgdma_fill(struct nouveau_gpuobj *pgt, dma_addr_t *list, u32 base, u32 cnt)
}
static int
-nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv44_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
- dma_addr_t *list = nvbe->pages;
+ dma_addr_t *list = nvbe->ttm.dma_address;
u32 pte = mem->start << 2, tmp[4];
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
int i;
nvbe->offset = mem->start << PAGE_SHIFT;
@@ -308,19 +240,18 @@ nv44_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
if (cnt)
nv44_sgdma_fill(pgt, list, pte, cnt);
- nv44_sgdma_flush(nvbe);
- nvbe->bound = true;
+ nv44_sgdma_flush(ttm);
return 0;
}
static int
-nv44_sgdma_unbind(struct ttm_backend *be)
+nv44_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct drm_nouveau_private *dev_priv = nvbe->dev->dev_private;
struct nouveau_gpuobj *pgt = dev_priv->gart_info.sg_ctxdma;
u32 pte = (nvbe->offset >> 12) << 2;
- u32 cnt = nvbe->nr_pages;
+ u32 cnt = ttm->num_pages;
if (pte & 0x0000000c) {
u32 max = 4 - ((pte >> 2) & 0x3);
@@ -342,55 +273,47 @@ nv44_sgdma_unbind(struct ttm_backend *be)
if (cnt)
nv44_sgdma_fill(pgt, NULL, pte, cnt);
- nv44_sgdma_flush(nvbe);
- nvbe->bound = false;
+ nv44_sgdma_flush(ttm);
return 0;
}
static struct ttm_backend_func nv44_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv44_sgdma_bind,
.unbind = nv44_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static int
-nv50_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem)
+nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
+ struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
+
/* noop: bound in move_notify() */
- node->pages = nvbe->pages;
- nvbe->pages = (dma_addr_t *)node;
- nvbe->bound = true;
+ node->pages = nvbe->ttm.dma_address;
return 0;
}
static int
-nv50_sgdma_unbind(struct ttm_backend *be)
+nv50_sgdma_unbind(struct ttm_tt *ttm)
{
- struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
- struct nouveau_mem *node = (struct nouveau_mem *)nvbe->pages;
/* noop: unbound in move_notify() */
- nvbe->pages = node->pages;
- node->pages = NULL;
- nvbe->bound = false;
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
- .populate = nouveau_sgdma_populate,
- .clear = nouveau_sgdma_clear,
.bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
-struct ttm_backend *
-nouveau_sgdma_init_ttm(struct drm_device *dev)
+struct ttm_tt *
+nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
+ unsigned long size, uint32_t page_flags,
+ struct page *dummy_read_page)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct drm_device *dev = dev_priv->dev;
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
@@ -398,9 +321,13 @@ nouveau_sgdma_init_ttm(struct drm_device *dev)
return NULL;
nvbe->dev = dev;
+ nvbe->ttm.ttm.func = dev_priv->gart_info.func;
- nvbe->backend.func = dev_priv->gart_info.func;
- return &nvbe->backend;
+ if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(nvbe);
+ return NULL;
+ }
+ return &nvbe->ttm.ttm;
}
int