1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
|
commit 8a524209fce67d3b6d2e831b5dad4eced796ce98
Author: Eric Anholt <eric@anholt.net>
Date: Mon Sep 1 16:45:29 2008 -0700
i915: Use struct_mutex to protect ring in GEM mode.
In the conversion for GEM, we had stopped using the hardware lock to protect
ring usage, since it was all internal to the DRM now. However, some paths
weren't converted to using struct_mutex to prevent multiple threads from
concurrently working on the ring, in particular between the vblank swap handler
and ioctls.
Signed-off-by: Eric Anholt <eric@anholt.net>
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 205d21e..25f59c1 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -588,9 +588,15 @@ static int i915_quiescent(struct drm_device * dev)
static int i915_flush_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ int ret;
+
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
- return i915_quiescent(dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_quiescent(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
static int i915_batchbuffer(struct drm_device *dev, void *data,
@@ -611,14 +617,16 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
batch->start, batch->used, batch->num_cliprects);
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
batch->num_cliprects *
sizeof(struct drm_clip_rect)))
return -EFAULT;
+ mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_batchbuffer(dev, batch);
+ mutex_unlock(&dev->struct_mutex);
sarea_priv->last_dispatch = (int)hw_status[5];
return ret;
@@ -637,7 +645,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (cmdbuf->num_cliprects &&
DRM_VERIFYAREA_READ(cmdbuf->cliprects,
@@ -647,7 +655,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
return -EFAULT;
}
+ mutex_lock(&dev->struct_mutex);
ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+ mutex_unlock(&dev->struct_mutex);
if (ret) {
DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
return ret;
@@ -660,11 +670,17 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
static int i915_flip_bufs(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
+ int ret;
+
DRM_DEBUG("%s\n", __FUNCTION__);
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
- return i915_dispatch_flip(dev);
+ mutex_lock(&dev->struct_mutex);
+ ret = i915_dispatch_flip(dev);
+ mutex_unlock(&dev->struct_mutex);
+
+ return ret;
}
static int i915_getparam(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 87b071a..8547f0a 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -285,6 +285,9 @@ typedef struct drm_i915_private {
*/
struct delayed_work retire_work;
+ /** Work task for vblank-related ring access */
+ struct work_struct vblank_work;
+
uint32_t next_gem_seqno;
/**
@@ -435,6 +438,7 @@ extern int i915_irq_wait(struct drm_device *dev, void *data,
void i915_user_irq_get(struct drm_device *dev);
void i915_user_irq_put(struct drm_device *dev);
+extern void i915_gem_vblank_work_handler(struct work_struct *work);
extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS);
extern void i915_driver_irq_preinstall(struct drm_device * dev);
extern int i915_driver_irq_postinstall(struct drm_device *dev);
@@ -538,6 +542,17 @@ extern void intel_opregion_free(struct drm_device *dev);
extern void opregion_asle_intr(struct drm_device *dev);
extern void opregion_enable_asle(struct drm_device *dev);
+/**
+ * Lock test for when it's just for synchronization of ring access.
+ *
+ * In that case, we don't need to do it when GEM is initialized as nobody else
+ * has access to the ring.
+ */
+#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
+ if (((drm_i915_private_t *)dev->dev_private)->ring.ring_obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file_priv); \
+} while (0)
+
#define I915_READ(reg) DRM_READ32(dev_priv->mmio_map, (reg))
#define I915_WRITE(reg,val) DRM_WRITE32(dev_priv->mmio_map, (reg), (val))
#define I915_READ16(reg) DRM_READ16(dev_priv->mmio_map, (reg))
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 90ae8a0..bb6e5a3 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2491,6 +2491,8 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.request_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
+ INIT_WORK(&dev_priv->mm.vblank_work,
+ i915_gem_vblank_work_handler);
dev_priv->mm.next_gem_seqno = 1;
i915_gem_detect_bit_6_swizzle(dev);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f295bdf..d04c526 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -349,6 +349,21 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
return count;
}
+void
+i915_gem_vblank_work_handler(struct work_struct *work)
+{
+ drm_i915_private_t *dev_priv;
+ struct drm_device *dev;
+
+ dev_priv = container_of(work, drm_i915_private_t,
+ mm.vblank_work);
+ dev = dev_priv->dev;
+
+ mutex_lock(&dev->struct_mutex);
+ i915_vblank_tasklet(dev);
+ mutex_unlock(&dev->struct_mutex);
+}
+
irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
{
struct drm_device *dev = (struct drm_device *) arg;
@@ -422,8 +437,12 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
if (iir & I915_ASLE_INTERRUPT)
opregion_asle_intr(dev);
- if (vblank && dev_priv->swaps_pending > 0)
- drm_locked_tasklet(dev, i915_vblank_tasklet);
+ if (vblank && dev_priv->swaps_pending > 0) {
+ if (dev_priv->ring.ring_obj == NULL)
+ drm_locked_tasklet(dev, i915_vblank_tasklet);
+ else
+ schedule_work(&dev_priv->mm.vblank_work);
+ }
return IRQ_HANDLED;
}
@@ -514,14 +533,15 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
- LOCK_TEST_WITH_RETURN(dev, file_priv);
+ RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
if (!dev_priv) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
-
+ mutex_lock(&dev->struct_mutex);
result = i915_emit_irq(dev);
+ mutex_unlock(&dev->struct_mutex);
if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
DRM_ERROR("copy_to_user\n");
|