Skip to content

Commit 519f490

Browse files
dma-buf/sync-file: fix warning about fence containers
The dma_fence_chain containers can show up in sync_files as well resulting in warnings that those can't be added to dma_fence_array containers when merging multiple sync_files together. Solve this by using the dma_fence_unwrap iterator to deep dive into the contained fences and then add those flatten out into a dma_fence_array. Signed-off-by: Christian König <christian.koenig@amd.com> Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch> Link: https://patchwork.freedesktop.org/patch/msgid/20220311110244.1245-2-christian.koenig@amd.com
1 parent 64a8f92 commit 519f490

1 file changed

Lines changed: 73 additions & 68 deletions

File tree

drivers/dma-buf/sync_file.c

Lines changed: 73 additions & 68 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
* Copyright (C) 2012 Google, Inc.
66
*/
77

8+
#include <linux/dma-fence-unwrap.h>
89
#include <linux/export.h>
910
#include <linux/file.h>
1011
#include <linux/fs.h>
@@ -172,20 +173,6 @@ static int sync_file_set_fence(struct sync_file *sync_file,
172173
return 0;
173174
}
174175

175-
static struct dma_fence **get_fences(struct sync_file *sync_file,
176-
int *num_fences)
177-
{
178-
if (dma_fence_is_array(sync_file->fence)) {
179-
struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
180-
181-
*num_fences = array->num_fences;
182-
return array->fences;
183-
}
184-
185-
*num_fences = 1;
186-
return &sync_file->fence;
187-
}
188-
189176
static void add_fence(struct dma_fence **fences,
190177
int *i, struct dma_fence *fence)
191178
{
@@ -210,86 +197,97 @@ static void add_fence(struct dma_fence **fences,
210197
static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
211198
struct sync_file *b)
212199
{
200+
struct dma_fence *a_fence, *b_fence, **fences;
201+
struct dma_fence_unwrap a_iter, b_iter;
202+
unsigned int index, num_fences;
213203
struct sync_file *sync_file;
214-
struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences;
215-
int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences;
216204

217205
sync_file = sync_file_alloc();
218206
if (!sync_file)
219207
return NULL;
220208

221-
a_fences = get_fences(a, &a_num_fences);
222-
b_fences = get_fences(b, &b_num_fences);
223-
if (a_num_fences > INT_MAX - b_num_fences)
224-
goto err;
209+
num_fences = 0;
210+
dma_fence_unwrap_for_each(a_fence, &a_iter, a->fence)
211+
++num_fences;
212+
dma_fence_unwrap_for_each(b_fence, &b_iter, b->fence)
213+
++num_fences;
225214

226-
num_fences = a_num_fences + b_num_fences;
215+
if (num_fences > INT_MAX)
216+
goto err_free_sync_file;
227217

228218
fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
229219
if (!fences)
230-
goto err;
220+
goto err_free_sync_file;
231221

232222
/*
233-
* Assume sync_file a and b are both ordered and have no
234-
* duplicates with the same context.
223+
* We can't guarantee that fences in both a and b are ordered, but it is
224+
* still quite likely.
235225
*
236-
* If a sync_file can only be created with sync_file_merge
237-
* and sync_file_create, this is a reasonable assumption.
226+
* So attempt to order the fences as we pass over them and merge fences
227+
* with the same context.
238228
*/
239-
for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) {
240-
struct dma_fence *pt_a = a_fences[i_a];
241-
struct dma_fence *pt_b = b_fences[i_b];
242229

243-
if (pt_a->context < pt_b->context) {
244-
add_fence(fences, &i, pt_a);
230+
index = 0;
231+
for (a_fence = dma_fence_unwrap_first(a->fence, &a_iter),
232+
b_fence = dma_fence_unwrap_first(b->fence, &b_iter);
233+
a_fence || b_fence; ) {
234+
235+
if (!b_fence) {
236+
add_fence(fences, &index, a_fence);
237+
a_fence = dma_fence_unwrap_next(&a_iter);
238+
239+
} else if (!a_fence) {
240+
add_fence(fences, &index, b_fence);
241+
b_fence = dma_fence_unwrap_next(&b_iter);
242+
243+
} else if (a_fence->context < b_fence->context) {
244+
add_fence(fences, &index, a_fence);
245+
a_fence = dma_fence_unwrap_next(&a_iter);
245246

246-
i_a++;
247-
} else if (pt_a->context > pt_b->context) {
248-
add_fence(fences, &i, pt_b);
247+
} else if (b_fence->context < a_fence->context) {
248+
add_fence(fences, &index, b_fence);
249+
b_fence = dma_fence_unwrap_next(&b_iter);
250+
251+
} else if (__dma_fence_is_later(a_fence->seqno, b_fence->seqno,
252+
a_fence->ops)) {
253+
add_fence(fences, &index, a_fence);
254+
a_fence = dma_fence_unwrap_next(&a_iter);
255+
b_fence = dma_fence_unwrap_next(&b_iter);
249256

250-
i_b++;
251257
} else {
252-
if (__dma_fence_is_later(pt_a->seqno, pt_b->seqno,
253-
pt_a->ops))
254-
add_fence(fences, &i, pt_a);
255-
else
256-
add_fence(fences, &i, pt_b);
257-
258-
i_a++;
259-
i_b++;
258+
add_fence(fences, &index, b_fence);
259+
a_fence = dma_fence_unwrap_next(&a_iter);
260+
b_fence = dma_fence_unwrap_next(&b_iter);
260261
}
261262
}
262263

263-
for (; i_a < a_num_fences; i_a++)
264-
add_fence(fences, &i, a_fences[i_a]);
265-
266-
for (; i_b < b_num_fences; i_b++)
267-
add_fence(fences, &i, b_fences[i_b]);
268-
269-
if (i == 0)
270-
fences[i++] = dma_fence_get(a_fences[0]);
264+
if (index == 0)
265+
add_fence(fences, &index, dma_fence_get_stub());
271266

272-
if (num_fences > i) {
273-
nfences = krealloc_array(fences, i, sizeof(*fences), GFP_KERNEL);
274-
if (!nfences)
275-
goto err;
267+
if (num_fences > index) {
268+
struct dma_fence **tmp;
276269

277-
fences = nfences;
270+
/* Keep going even when reducing the size failed */
271+
tmp = krealloc_array(fences, index, sizeof(*fences),
272+
GFP_KERNEL);
273+
if (tmp)
274+
fences = tmp;
278275
}
279276

280-
if (sync_file_set_fence(sync_file, fences, i) < 0)
281-
goto err;
277+
if (sync_file_set_fence(sync_file, fences, index) < 0)
278+
goto err_put_fences;
282279

283280
strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name));
284281
return sync_file;
285282

286-
err:
287-
while (i)
288-
dma_fence_put(fences[--i]);
283+
err_put_fences:
284+
while (index)
285+
dma_fence_put(fences[--index]);
289286
kfree(fences);
287+
288+
err_free_sync_file:
290289
fput(sync_file->file);
291290
return NULL;
292-
293291
}
294292

295293
static int sync_file_release(struct inode *inode, struct file *file)
@@ -398,19 +396,23 @@ static int sync_fill_fence_info(struct dma_fence *fence,
398396
static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
399397
unsigned long arg)
400398
{
401-
struct sync_file_info info;
402399
struct sync_fence_info *fence_info = NULL;
403-
struct dma_fence **fences;
400+
struct dma_fence_unwrap iter;
401+
struct sync_file_info info;
402+
unsigned int num_fences;
403+
struct dma_fence *fence;
404+
int ret;
404405
__u32 size;
405-
int num_fences, ret, i;
406406

407407
if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
408408
return -EFAULT;
409409

410410
if (info.flags || info.pad)
411411
return -EINVAL;
412412

413-
fences = get_fences(sync_file, &num_fences);
413+
num_fences = 0;
414+
dma_fence_unwrap_for_each(fence, &iter, sync_file->fence)
415+
++num_fences;
414416

415417
/*
416418
* Passing num_fences = 0 means that userspace doesn't want to
@@ -433,8 +435,11 @@ static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
433435
if (!fence_info)
434436
return -ENOMEM;
435437

436-
for (i = 0; i < num_fences; i++) {
437-
int status = sync_fill_fence_info(fences[i], &fence_info[i]);
438+
num_fences = 0;
439+
dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) {
440+
int status;
441+
442+
status = sync_fill_fence_info(fence, &fence_info[num_fences++]);
438443
info.status = info.status <= 0 ? info.status : status;
439444
}
440445

0 commit comments

Comments
 (0)