aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_mem.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_mem.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_mem.c363
1 files changed, 313 insertions, 50 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c
index 9689d414768..a163c7c612e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_mem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_mem.c
@@ -35,6 +35,8 @@
#include "drm_sarea.h"
#include "nouveau_drv.h"
+#define MIN(a,b) a < b ? a : b
+
/*
* NV10-NV40 tiling helpers
*/
@@ -47,18 +49,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
- struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+ struct nouveau_tile_reg *tile = &dev_priv->tile[i];
tile->addr = addr;
tile->size = size;
tile->used = !!pitch;
nouveau_fence_unref((void **)&tile->fence);
- if (!pfifo->cache_flush(dev))
- return;
-
pfifo->reassign(dev, false);
- pfifo->cache_flush(dev);
pfifo->cache_pull(dev, false);
nouveau_wait_for_idle(dev);
@@ -76,34 +74,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
- struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
- int i;
+ struct nouveau_tile_reg *found = NULL;
+ unsigned long i, flags;
- spin_lock(&dev_priv->tile.lock);
+ spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
for (i = 0; i < pfb->num_tiles; i++) {
- if (tile[i].used)
+ struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+
+ if (tile->used)
/* Tile region in use. */
continue;
- if (tile[i].fence &&
- !nouveau_fence_signalled(tile[i].fence, NULL))
+ if (tile->fence &&
+ !nouveau_fence_signalled(tile->fence, NULL))
/* Pending tile region. */
continue;
- if (max(tile[i].addr, addr) <
- min(tile[i].addr + tile[i].size, addr + size))
+ if (max(tile->addr, addr) <
+ min(tile->addr + tile->size, addr + size))
/* Kill an intersecting tile region. */
nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
if (pitch && !found) {
/* Free tile region. */
nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
- found = &tile[i];
+ found = tile;
}
}
- spin_unlock(&dev_priv->tile.lock);
+ spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return found;
}
@@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
virt += (end - pte);
while (pte < end) {
- nv_wo32(dev, pgt, pte++, offset_l);
- nv_wo32(dev, pgt, pte++, offset_h);
+ nv_wo32(pgt, (pte * 4) + 0, offset_l);
+ nv_wo32(pgt, (pte * 4) + 4, offset_h);
+ pte += 2;
}
}
}
@@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
pages -= (end - pte);
virt += (end - pte) << 15;
- while (pte < end)
- nv_wo32(dev, pgt, pte++, 0);
+ while (pte < end) {
+ nv_wo32(pgt, (pte * 4), 0);
+ pte++;
+ }
}
dev_priv->engine.instmem.flush(dev);
@@ -218,7 +221,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
* Cleanup everything
*/
void
-nouveau_mem_close(struct drm_device *dev)
+nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -229,6 +232,19 @@ nouveau_mem_close(struct drm_device *dev)
nouveau_ttm_global_release(dev_priv);
+ if (dev_priv->fb_mtrr >= 0) {
+ drm_mtrr_del(dev_priv->fb_mtrr,
+ pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
+ dev_priv->fb_mtrr = -1;
+ }
+}
+
+void
+nouveau_mem_gart_fini(struct drm_device *dev)
+{
+ nouveau_sgdma_takedown(dev);
+
if (drm_core_has_AGP(dev) && dev->agp) {
struct drm_agp_mem *entry, *tempe;
@@ -248,13 +264,6 @@ nouveau_mem_close(struct drm_device *dev)
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
-
- if (dev_priv->fb_mtrr) {
- drm_mtrr_del(dev_priv->fb_mtrr,
- pci_resource_start(dev->pdev, 1),
- pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
- dev_priv->fb_mtrr = -1;
- }
}
static uint32_t
@@ -305,8 +314,62 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
return 0;
}
-/* returns the amount of FB ram in bytes */
-int
+static void
+nv50_vram_preinit(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int i, parts, colbits, rowbitsa, rowbitsb, banks;
+ u64 rowsize, predicted;
+ u32 r0, r4, rt, ru;
+
+ r0 = nv_rd32(dev, 0x100200);
+ r4 = nv_rd32(dev, 0x100204);
+ rt = nv_rd32(dev, 0x100250);
+ ru = nv_rd32(dev, 0x001540);
+ NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+ for (i = 0, parts = 0; i < 8; i++) {
+ if (ru & (0x00010000 << i))
+ parts++;
+ }
+
+ colbits = (r4 & 0x0000f000) >> 12;
+ rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+ rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+ banks = ((r4 & 0x01000000) ? 8 : 4);
+
+ rowsize = parts * banks * (1 << colbits) * 8;
+ predicted = rowsize << rowbitsa;
+ if (r0 & 0x00000004)
+ predicted += rowsize << rowbitsb;
+
+ if (predicted != dev_priv->vram_size) {
+ NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+ (u32)(dev_priv->vram_size >> 20));
+ NV_WARN(dev, "we calculated %dMiB VRAM\n",
+ (u32)(predicted >> 20));
+ }
+
+ dev_priv->vram_rblock_size = rowsize >> 12;
+ if (rt & 1)
+ dev_priv->vram_rblock_size *= 3;
+
+ NV_DEBUG(dev, "rblock %lld bytes\n",
+ (u64)dev_priv->vram_rblock_size << 12);
+}
+
+static void
+nvaa_vram_preinit(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /* To our knowledge, there's no large scale reordering of pages
+ * that occurs on IGP chipsets.
+ */
+ dev_priv->vram_rblock_size = 1;
+}
+
+static int
nouveau_mem_detect(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -325,9 +388,18 @@ nouveau_mem_detect(struct drm_device *dev)
dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
dev_priv->vram_size &= 0xffffffff00ll;
- if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
+
+ switch (dev_priv->chipset) {
+ case 0xaa:
+ case 0xac:
+ case 0xaf:
dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
dev_priv->vram_sys_base <<= 12;
+ nvaa_vram_preinit(dev);
+ break;
+ default:
+ nv50_vram_preinit(dev);
+ break;
}
} else {
dev_priv->vram_size = nv_rd32(dev, 0x10f20c) << 20;
@@ -345,6 +417,33 @@ nouveau_mem_detect(struct drm_device *dev)
return -ENOMEM;
}
+#if __OS_HAS_AGP
+static unsigned long
+get_agp_mode(struct drm_device *dev, unsigned long mode)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ /*
+ * FW seems to be broken on nv18, it makes the card lock up
+ * randomly.
+ */
+ if (dev_priv->chipset == 0x18)
+ mode &= ~PCI_AGP_COMMAND_FW;
+
+ /*
+ * AGP mode set in the command line.
+ */
+ if (nouveau_agpmode > 0) {
+ bool agpv3 = mode & 0x8;
+ int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+
+ mode = (mode & ~0x7) | (rate & 0x7);
+ }
+
+ return mode;
+}
+#endif
+
int
nouveau_mem_reset_agp(struct drm_device *dev)
{
@@ -355,7 +454,8 @@ nouveau_mem_reset_agp(struct drm_device *dev)
/* First of all, disable fast writes, otherwise if it's
* already enabled in the AGP bridge and we disable the card's
* AGP controller we might be locking ourselves out of it. */
- if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) {
+ if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
+ dev->agp->mode) & PCI_AGP_COMMAND_FW) {
struct drm_agp_info info;
struct drm_agp_mode mode;
@@ -363,7 +463,7 @@ nouveau_mem_reset_agp(struct drm_device *dev)
if (ret)
return ret;
- mode.mode = info.mode & ~PCI_AGP_COMMAND_FW;
+ mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
ret = drm_agp_enable(dev, mode);
if (ret)
return ret;
@@ -418,7 +518,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
}
/* see agp.h for the AGPSTAT_* modes available */
- mode.mode = info.mode;
+ mode.mode = get_agp_mode(dev, info.mode);
ret = drm_agp_enable(dev, mode);
if (ret) {
NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
@@ -433,24 +533,27 @@ nouveau_mem_init_agp(struct drm_device *dev)
}
int
-nouveau_mem_init(struct drm_device *dev)
+nouveau_mem_vram_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
- int ret, dma_bits = 32;
-
- dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
- dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+ int ret, dma_bits;
if (dev_priv->card_type >= NV_50 &&
pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
dma_bits = 40;
+ else
+ dma_bits = 32;
ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
- if (ret) {
- NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
+ if (ret)
return ret;
- }
+
+ ret = nouveau_mem_detect(dev);
+ if (ret)
+ return ret;
+
+ dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
ret = nouveau_ttm_global_init(dev_priv);
if (ret)
@@ -465,8 +568,6 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- spin_lock_init(&dev_priv->tile.lock);
-
dev_priv->fb_available_size = dev_priv->vram_size;
dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
@@ -474,7 +575,16 @@ nouveau_mem_init(struct drm_device *dev)
pci_resource_len(dev->pdev, 1);
dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
- /* remove reserved space at end of vram from available amount */
+ /* reserve space at end of VRAM for PRAMIN */
+ if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
+ dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
+ dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
+ else
+ if (dev_priv->card_type >= NV_40)
+ dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
+ else
+ dev_priv->ramin_rsvd_vram = (512 * 1024);
+
dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
dev_priv->fb_aper_free = dev_priv->fb_available_size;
@@ -495,9 +605,23 @@ nouveau_mem_init(struct drm_device *dev)
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
}
- /* GART */
+ dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+ pci_resource_len(dev->pdev, 1),
+ DRM_MTRR_WC);
+ return 0;
+}
+
+int
+nouveau_mem_gart_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+ int ret;
+
+ dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
#if !defined(__powerpc__) && !defined(__ia64__)
- if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) {
+ if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
ret = nouveau_mem_init_agp(dev);
if (ret)
NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -523,11 +647,150 @@ nouveau_mem_init(struct drm_device *dev)
return ret;
}
- dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
- pci_resource_len(dev->pdev, 1),
- DRM_MTRR_WC);
-
return 0;
}
+void
+nouveau_mem_timing_init(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+ struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+ struct nvbios *bios = &dev_priv->vbios;
+ struct bit_entry P;
+ u8 tUNK_0, tUNK_1, tUNK_2;
+ u8 tRP; /* Byte 3 */
+ u8 tRAS; /* Byte 5 */
+ u8 tRFC; /* Byte 7 */
+ u8 tRC; /* Byte 9 */
+ u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
+ u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+ u8 *mem = NULL, *entry;
+ int i, recordlen, entries;
+
+ if (bios->type == NVBIOS_BIT) {
+ if (bit_table(dev, 'P', &P))
+ return;
+
+ if (P.version == 1)
+ mem = ROMPTR(bios, P.data[4]);
+ else
+ if (P.version == 2)
+ mem = ROMPTR(bios, P.data[8]);
+ else {
+ NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
+ }
+ } else {
+ NV_DEBUG(dev, "BMP version too old for memory\n");
+ return;
+ }
+
+ if (!mem) {
+ NV_DEBUG(dev, "memory timing table pointer invalid\n");
+ return;
+ }
+ if (mem[0] != 0x10) {
+ NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
+ return;
+ }
+
+ /* validate record length */
+ entries = mem[2];
+ recordlen = mem[3];
+ if (recordlen < 15) {
+ NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
+ return;
+ }
+
+ /* parse vbios entries into common format */
+ memtimings->timing =
+ kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+ if (!memtimings->timing)
+ return;
+
+ entry = mem + mem[1];
+ for (i = 0; i < entries; i++, entry += recordlen) {
+ struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
+ if (entry[0] == 0)
+ continue;
+
+ tUNK_18 = 1;
+ tUNK_19 = 1;
+ tUNK_20 = 0;
+ tUNK_21 = 0;
+ switch (MIN(recordlen,21)) {
+ case 21:
+ tUNK_21 = entry[21];
+ case 20:
+ tUNK_20 = entry[20];
+ case 19:
+ tUNK_19 = entry[19];
+ case 18:
+ tUNK_18 = entry[18];
+ default:
+ tUNK_0 = entry[0];
+ tUNK_1 = entry[1];
+ tUNK_2 = entry[2];
+ tRP = entry[3];
+ tRAS = entry[5];
+ tRFC = entry[7];
+ tRC = entry[9];
+ tUNK_10 = entry[10];
+ tUNK_11 = entry[11];
+ tUNK_12 = entry[12];
+ tUNK_13 = entry[13];
+ tUNK_14 = entry[14];
+ break;
+ }
+
+ timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
+
+ /* XXX: I don't trust the -1's and +1's... they must come
+ * from somewhere! */
+ timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+ tUNK_18 << 16 |
+ (tUNK_1 + tUNK_19 + 1) << 8 |
+ (tUNK_2 - 1));
+
+ timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
+ if(recordlen > 19) {
+ timing->reg_100228 += (tUNK_19 - 1) << 24;
+ } else {
+ timing->reg_100228 += tUNK_12 << 24;
+ }
+
+ /* XXX: reg_10022c */
+
+ timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+ tUNK_13 << 8 | tUNK_13);
+
+ /* XXX: +6? */
+ timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
+ if(tUNK_10 > tUNK_11) {
+ timing->reg_100234 += tUNK_10 << 16;
+ } else {
+ timing->reg_100234 += tUNK_11 << 16;
+ }
+
+ /* XXX; reg_100238, reg_10023c */
+ NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
+ timing->reg_100220, timing->reg_100224,
+ timing->reg_100228, timing->reg_10022c);
+ NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
+ timing->reg_100230, timing->reg_100234,
+ timing->reg_100238, timing->reg_10023c);
+ }
+
+ memtimings->nr_timing = entries;
+ memtimings->supported = true;
+}
+
+void
+nouveau_mem_timing_fini(struct drm_device *dev)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
+
+ kfree(mem->timing);
+}