blob: 6a3822bc6af228a063113f5474c567eae59fb936 [file] [log] [blame]
Zou Nan hai8187a2b2010-05-21 09:08:55 +08001#ifndef _INTEL_RINGBUFFER_H_
2#define _INTEL_RINGBUFFER_H_
3
Chris Wilson1ec14ad2010-12-04 11:30:53 +00004enum {
5 RCS = 0x0,
6 VCS,
7 BCS,
8 I915_NUM_RINGS,
9};
10
Zou Nan hai8187a2b2010-05-21 09:08:55 +080011struct intel_hw_status_page {
Chris Wilson78501ea2010-10-27 12:18:21 +010012 u32 __iomem *page_addr;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080013 unsigned int gfx_addr;
Chris Wilson05394f32010-11-08 19:18:58 +000014 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080015};
16
Zou Nan haicae58522010-11-09 17:17:32 +080017#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
18
19#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
Daniel Vetter870e86d2010-08-02 16:29:44 +020020#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080021
22#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
Daniel Vetter6c0e1c52010-08-02 16:33:33 +020023#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080024
25#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base))
Daniel Vetter570ef602010-08-02 17:06:23 +020026#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
Zou Nan haicae58522010-11-09 17:17:32 +080027
28#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
Daniel Vetter7f2ab692010-08-02 17:06:59 +020029#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
Daniel Vetter870e86d2010-08-02 16:29:44 +020030
Chris Wilson1ec14ad2010-12-04 11:30:53 +000031#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
32#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
33#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
34
Zou Nan hai8187a2b2010-05-21 09:08:55 +080035struct intel_ring_buffer {
36 const char *name;
Chris Wilson92204342010-09-18 11:02:01 +010037 enum intel_ring_id {
38 RING_RENDER = 0x1,
39 RING_BSD = 0x2,
Chris Wilson549f7362010-10-19 11:19:32 +010040 RING_BLT = 0x4,
Chris Wilson92204342010-09-18 11:02:01 +010041 } id;
Daniel Vetter333e9fe2010-08-02 16:24:01 +020042 u32 mmio_base;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080043 void *virtual_start;
44 struct drm_device *dev;
Chris Wilson05394f32010-11-08 19:18:58 +000045 struct drm_i915_gem_object *obj;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080046
47 unsigned int head;
48 unsigned int tail;
Chris Wilson780f0ca2010-09-23 17:45:39 +010049 int space;
Chris Wilsonc2c347a92010-10-27 15:11:53 +010050 int size;
Zou Nan hai8187a2b2010-05-21 09:08:55 +080051 struct intel_hw_status_page status_page;
52
Chris Wilsonb2223492010-10-27 15:27:33 +010053 u32 irq_seqno; /* last seq seem at irq time */
54 u32 waiting_seqno;
Chris Wilson1ec14ad2010-12-04 11:30:53 +000055 u32 sync_seqno[I915_NUM_RINGS-1];
56 u32 irq_refcount;
57 void (*irq_get)(struct intel_ring_buffer *ring);
58 void (*irq_put)(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080059
Chris Wilson78501ea2010-10-27 12:18:21 +010060 int (*init)(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080061
Chris Wilson78501ea2010-10-27 12:18:21 +010062 void (*write_tail)(struct intel_ring_buffer *ring,
Chris Wilson297b0c52010-10-22 17:02:41 +010063 u32 value);
Chris Wilson78501ea2010-10-27 12:18:21 +010064 void (*flush)(struct intel_ring_buffer *ring,
65 u32 invalidate_domains,
66 u32 flush_domains);
Chris Wilson3cce4692010-10-27 16:11:02 +010067 int (*add_request)(struct intel_ring_buffer *ring,
68 u32 *seqno);
Chris Wilson78501ea2010-10-27 12:18:21 +010069 u32 (*get_seqno)(struct intel_ring_buffer *ring);
70 int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
Chris Wilsonc4e7a412010-11-30 14:10:25 +000071 u32 offset, u32 length);
Zou Nan hai8d192152010-11-02 16:31:01 +080072 void (*cleanup)(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +080073
74 /**
75 * List of objects currently involved in rendering from the
76 * ringbuffer.
77 *
78 * Includes buffers having the contents of their GPU caches
79 * flushed, not necessarily primitives. last_rendering_seqno
80 * represents when the rendering involved will be completed.
81 *
82 * A reference is held on the buffer while on this list.
83 */
84 struct list_head active_list;
85
86 /**
87 * List of breadcrumbs associated with GPU requests currently
88 * outstanding.
89 */
90 struct list_head request_list;
91
Chris Wilsona56ba562010-09-28 10:07:56 +010092 /**
Chris Wilson64193402010-10-24 12:38:05 +010093 * List of objects currently pending a GPU write flush.
94 *
95 * All elements on this list will belong to either the
96 * active_list or flushing_list, last_rendering_seqno can
97 * be used to differentiate between the two elements.
98 */
99 struct list_head gpu_write_list;
100
101 /**
Chris Wilsona56ba562010-09-28 10:07:56 +0100102 * Do we have some not yet emitted requests outstanding?
103 */
Chris Wilson5d97eb62010-11-10 20:40:02 +0000104 u32 outstanding_lazy_request;
Chris Wilsona56ba562010-09-28 10:07:56 +0100105
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800106 wait_queue_head_t irq_queue;
107 drm_local_map_t map;
Zou Nan hai8d192152010-11-02 16:31:01 +0800108
109 void *private;
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800110};
111
112static inline u32
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000113intel_ring_sync_index(struct intel_ring_buffer *ring,
114 struct intel_ring_buffer *other)
115{
116 int idx;
117
118 /*
119 * cs -> 0 = vcs, 1 = bcs
120 * vcs -> 0 = bcs, 1 = cs,
121 * bcs -> 0 = cs, 1 = vcs.
122 */
123
124 idx = (other - ring) - 1;
125 if (idx < 0)
126 idx += I915_NUM_RINGS;
127
128 return idx;
129}
130
131static inline u32
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800132intel_read_status_page(struct intel_ring_buffer *ring,
Chris Wilson78501ea2010-10-27 12:18:21 +0100133 int reg)
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800134{
Chris Wilson78501ea2010-10-27 12:18:21 +0100135 return ioread32(ring->status_page.page_addr + reg);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800136}
137
Chris Wilson78501ea2010-10-27 12:18:21 +0100138void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
Chris Wilsone1f99ce2010-10-27 12:45:26 +0100139int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
140int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
Chris Wilsone898cd22010-08-04 15:18:14 +0100141
Chris Wilson78501ea2010-10-27 12:18:21 +0100142static inline void intel_ring_emit(struct intel_ring_buffer *ring,
143 u32 data)
Chris Wilsone898cd22010-08-04 15:18:14 +0100144{
Chris Wilson78501ea2010-10-27 12:18:21 +0100145 iowrite32(data, ring->virtual_start + ring->tail);
Chris Wilsone898cd22010-08-04 15:18:14 +0100146 ring->tail += 4;
147}
148
Chris Wilson78501ea2010-10-27 12:18:21 +0100149void intel_ring_advance(struct intel_ring_buffer *ring);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800150
Chris Wilson78501ea2010-10-27 12:18:21 +0100151u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
Chris Wilson1ec14ad2010-12-04 11:30:53 +0000152int intel_ring_sync(struct intel_ring_buffer *ring,
153 struct intel_ring_buffer *to,
154 u32 seqno);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800155
Xiang, Haihao5c1143b2010-09-16 10:43:11 +0800156int intel_init_render_ring_buffer(struct drm_device *dev);
157int intel_init_bsd_ring_buffer(struct drm_device *dev);
Chris Wilson549f7362010-10-19 11:19:32 +0100158int intel_init_blt_ring_buffer(struct drm_device *dev);
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800159
Chris Wilson78501ea2010-10-27 12:18:21 +0100160u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
161void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
Daniel Vetter79f321b2010-09-24 21:20:10 +0200162
Zou Nan hai8187a2b2010-05-21 09:08:55 +0800163#endif /* _INTEL_RINGBUFFER_H_ */