Skip to content

Commit dda7e67

Browse files
committed
coherent: tighten up the API
This patch adds documentation to the coherent API header and cleans up several potentially harmful operations. Specifically, it isn't a good idea to modify memory via uncached addresses and then write back caches, because the latter can overwrite uncached writes. It works because there is no data in cache at that point, which also makes that call redundant. Similarly, we are sure not to have any data in caches when obtaining access to the memory, because otherwise our spinlock-acquisition operation could be overwritten by a speculative write back. Signed-off-by: Guennadi Liakhovetski <guennadi.liakhovetski@linux.intel.com>
1 parent 40d2f66 commit dda7e67

File tree

1 file changed

+32
-10
lines changed

1 file changed

+32
-10
lines changed

src/include/sof/coherent.h

Lines changed: 32 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -20,11 +20,37 @@
2020
#define __coherent __attribute__((packed, aligned(DCACHE_LINE_SIZE)))
2121

2222
/*
23+
* The coherent API allows optimized access to memory by multiple cores, using
24+
* cache, taking care about coherence. The intended use is to let cores acquire
25+
* ownership of such shared objects, use them, and then release them, possibly
26+
* to be re-acquired by other cores. Such shared objects must only be accessed
27+
* via this API. It's designed to be primarily used with dynamically allocated
28+
* objects because of their well-defined life span. It can also be used with
29+
* objects from .data or .bss sections but greater care must be takenwith them
30+
* to strictly follow the API flow.
31+
*
32+
* The API assumes, that in the beginning no core has cache lines associated
33+
* with the memory area, used with it. That is true for dynamically allocated
34+
* memory, because when such memory is freed, its cache is invalidated - as long
35+
* as that memory was never accessed by other cores, except by using this API.
36+
* The first call must be coherent_init(), which initializes the header. If the
37+
* object will be used by multiple cores, next coherent_shared() must be called.
38+
* After that to use that memory, coherent_acquire_irq() or coherent_acquire()
39+
* must be called, which acquires ownership of the object and returns a cached
40+
* address of the memory. After that the user can perform cached access to the
41+
* memory. To release the memory, one of coherent_release_irq() or
42+
* coherent_release() must be called. The only time when the memory is accessed
43+
* using cache is between those two calls, so only when releasing the memory we
44+
* have to write back and invalidate caches to make sure, that next time we
45+
* acquire this memory, our uncached header access will not be overwritten! When
46+
* memory is not needed any more, typically before freeing the memory,
47+
* coherent_free() should be called.
48+
*
2349
* This structure needs to be embedded at the start of any container to ensure
2450
* container object cache alignment and to minimise non cache access when
2551
* acquiring ownership.
2652
*
27-
* This structure should not be accessed outside of these APIs.
53+
* This structure must not be accessed outside of these APIs.
2854
* The shared flag is only set at coherent init and thereafter it's RO.
2955
*/
3056
struct coherent {
@@ -58,12 +84,8 @@ struct coherent {
5884
#define CHECK_COHERENT_IRQ(_c)
5985
#endif
6086

61-
/*
62-
* Incoherent devices require manual cache invalidation and writeback as
63-
* well as locking to manage shared access.
64-
*/
65-
6687
#if CONFIG_INCOHERENT
88+
/* When coherent_acquire() is called, we are sure not to have cache for this memory */
6789
__must_check static inline struct coherent *coherent_acquire(struct coherent *c,
6890
const size_t size)
6991
{
@@ -158,8 +180,8 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si
158180
object->member.shared = false; \
159181
object->member.core = cpu_get_id(); \
160182
list_init(&object->member.list); \
161-
/* wtb and inv local data to coherent object */ \
162-
dcache_writeback_invalidate_region(uncache_to_cache(object), sizeof(*object)); \
183+
/* inv local data to coherent object */ \
184+
dcache_invalidate_region(uncache_to_cache(object), sizeof(*object)); \
163185
} while (0)
164186

165187
#define coherent_free(object, member) \
@@ -177,7 +199,7 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si
177199
ADDR_IS_COHERENT(object); \
178200
spin_lock(&(object)->member.lock); \
179201
(object)->member.shared = true; \
180-
dcache_writeback_invalidate_region(object, sizeof(*object)); \
202+
dcache_invalidate_region(object, sizeof(*object)); \
181203
spin_unlock(&(object)->member.lock); \
182204
} while (0)
183205

@@ -188,7 +210,7 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si
188210
ADDR_IS_COHERENT(object); \
189211
spin_lock_irq(&(object)->member.lock, &(object)->member.flags); \
190212
(object)->member.shared = true; \
191-
dcache_writeback_invalidate_region(object, sizeof(*object)); \
213+
dcache_invalidate_region(object, sizeof(*object)); \
192214
spin_unlock_irq(&(object)->member.lock, &(object)->member.flags); \
193215
} while (0)
194216
#else

0 commit comments

Comments
 (0)