Merge branch 'master' of git://git.denx.de/u-boot
[oweals/u-boot.git] / common / bouncebuf.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Generic bounce buffer implementation
4  *
5  * Copyright (C) 2012 Marek Vasut <marex@denx.de>
6  */
7
8 #include <common.h>
9 #include <cpu_func.h>
10 #include <malloc.h>
11 #include <errno.h>
12 #include <bouncebuf.h>
13
14 static int addr_aligned(struct bounce_buffer *state)
15 {
16         const ulong align_mask = ARCH_DMA_MINALIGN - 1;
17
18         /* Check if start is aligned */
19         if ((ulong)state->user_buffer & align_mask) {
20                 debug("Unaligned buffer address %p\n", state->user_buffer);
21                 return 0;
22         }
23
24         /* Check if length is aligned */
25         if (state->len != state->len_aligned) {
26                 debug("Unaligned buffer length %zu\n", state->len);
27                 return 0;
28         }
29
30         /* Aligned */
31         return 1;
32 }
33
34 int bounce_buffer_start_extalign(struct bounce_buffer *state, void *data,
35                                  size_t len, unsigned int flags,
36                                  size_t alignment,
37                                  int (*addr_is_aligned)(struct bounce_buffer *state))
38 {
39         state->user_buffer = data;
40         state->bounce_buffer = data;
41         state->len = len;
42         state->len_aligned = roundup(len, alignment);
43         state->flags = flags;
44
45         if (!addr_is_aligned(state)) {
46                 state->bounce_buffer = memalign(alignment,
47                                                 state->len_aligned);
48                 if (!state->bounce_buffer)
49                         return -ENOMEM;
50
51                 if (state->flags & GEN_BB_READ)
52                         memcpy(state->bounce_buffer, state->user_buffer,
53                                 state->len);
54         }
55
56         /*
57          * Flush data to RAM so DMA reads can pick it up,
58          * and any CPU writebacks don't race with DMA writes
59          */
60         flush_dcache_range((unsigned long)state->bounce_buffer,
61                                 (unsigned long)(state->bounce_buffer) +
62                                         state->len_aligned);
63
64         return 0;
65 }
66
67 int bounce_buffer_start(struct bounce_buffer *state, void *data,
68                         size_t len, unsigned int flags)
69 {
70         return bounce_buffer_start_extalign(state, data, len, flags,
71                                             ARCH_DMA_MINALIGN,
72                                             addr_aligned);
73 }
74
75 int bounce_buffer_stop(struct bounce_buffer *state)
76 {
77         if (state->flags & GEN_BB_WRITE) {
78                 /* Invalidate cache so that CPU can see any newly DMA'd data */
79                 invalidate_dcache_range((unsigned long)state->bounce_buffer,
80                                         (unsigned long)(state->bounce_buffer) +
81                                                 state->len_aligned);
82         }
83
84         if (state->bounce_buffer == state->user_buffer)
85                 return 0;
86
87         if (state->flags & GEN_BB_WRITE)
88                 memcpy(state->user_buffer, state->bounce_buffer, state->len);
89
90         free(state->bounce_buffer);
91
92         return 0;
93 }