1+ /*
2+ * rv32emu is freely redistributable under the MIT License. See the file
3+ * "LICENSE" for information on usage and redistribution of this file.
4+ */
5+
6+ #include <assert.h>
7+ #include <stdbool.h>
8+ #include <stddef.h>
9+ #include <stdlib.h>
10+ #include <string.h>
11+
12+ #include "cache.h"
13+
14+ #define MIN (a , b ) ((a < b) ? a : b)
15+ #define GOLDEN_RATIO_32 0x61C88647
16+ #define HASH (val ) \
17+ (((val) * (GOLDEN_RATIO_32)) >> (32 - (cache_size_bits))) & (cache_size - 1)
18+
19+ static uint32_t cache_size , cache_size_bits ;
20+
21+ /*
22+ * Adaptive Replacement Cache (ARC) improves the fundamental LRU strategy
23+ * by dividing the cache into two lists, T1 and T2. list T1 is for LRU
24+ * strategy and list T2 is for LFU strategy. Moreover, it keeps two ghost
25+ * lists, B1 and B2, with replaced entries from the LRU list going into B1
26+ * and the LFU list going into B2.
27+ *
28+ * Based on B1 and B2, ARC will modify the size of T1 and T2. When a cache
29+ * hit occurs in B1, it indicates that T1's capacity is too little, therefore
30+ * we increase T1's size while decreasing T2. But, if the cache hit occurs in
31+ * B2, we would increase the size of T2 and decrease the size of T1.
32+ */
33+ typedef enum {
34+ LRU_list ,
35+ LFU_list ,
36+ LRU_ghost_list ,
37+ LFU_ghost_list ,
38+ N_CACHE_LIST_TYPES
39+ } cache_list_t ;
40+
41+ struct list_head {
42+ struct list_head * prev , * next ;
43+ };
44+
45+ struct hlist_head {
46+ struct hlist_node * first ;
47+ };
48+
49+ struct hlist_node {
50+ struct hlist_node * next , * * pprev ;
51+ };
52+
53+ typedef struct {
54+ void * value ;
55+ uint32_t key ;
56+ cache_list_t type ;
57+ struct list_head list ;
58+ struct hlist_node ht_list ;
59+ } arc_entry_t ;
60+
61+ typedef struct {
62+ struct hlist_head * ht_list_head ;
63+ } hashtable_t ;
64+
65+ typedef struct cache {
66+ struct list_head * lists [N_CACHE_LIST_TYPES ];
67+ uint32_t list_size [N_CACHE_LIST_TYPES ];
68+ hashtable_t * map ;
69+ uint32_t capacity ;
70+ uint32_t lru_capacity ;
71+ } cache_t ;
72+
73+ static inline void INIT_LIST_HEAD (struct list_head * head )
74+ {
75+ head -> next = head ;
76+ head -> prev = head ;
77+ }
78+
79+ static inline void list_add (struct list_head * node , struct list_head * head )
80+ {
81+ struct list_head * next = head -> next ;
82+
83+ next -> prev = node ;
84+ node -> next = next ;
85+ node -> prev = head ;
86+ head -> next = node ;
87+ }
88+
89+ static inline void list_del (struct list_head * node )
90+ {
91+ struct list_head * next = node -> next ;
92+ struct list_head * prev = node -> prev ;
93+
94+ next -> prev = prev ;
95+ prev -> next = next ;
96+ }
97+
98+ static inline void list_del_init (struct list_head * node )
99+ {
100+ list_del (node );
101+ INIT_LIST_HEAD (node );
102+ }
103+
104+ #define list_entry (node , type , member ) container_of(node, type, member)
105+
106+ #define list_last_entry (head , type , member ) \
107+ list_entry((head)->prev, type, member)
108+
109+ #ifdef __HAVE_TYPEOF
110+ #define list_for_each_entry_safe (entry , safe , head , member ) \
111+ for (entry = list_entry((head)->next, __typeof__(*entry), member), \
112+ safe = list_entry(entry->member.next, __typeof__(*entry), member); \
113+ &entry->member != (head); entry = safe, \
114+ safe = list_entry(safe->member.next, __typeof__(*entry), member))
115+ #else
116+ #define list_for_each_entry_safe (entry , safe , head , member , type ) \
117+ for (entry = list_entry((head)->next, type, member), \
118+ safe = list_entry(entry->member.next, type, member); \
119+ &entry->member != (head); \
120+ entry = safe, safe = list_entry(safe->member.next, type, member))
121+ #endif
122+
123+ #define INIT_HLIST_HEAD (ptr ) ((ptr)->first = NULL)
124+
125+ static inline void INIT_HLIST_NODE (struct hlist_node * h )
126+ {
127+ h -> next = NULL ;
128+ h -> pprev = NULL ;
129+ }
130+
131+ static inline int hlist_empty (const struct hlist_head * h )
132+ {
133+ return !h -> first ;
134+ }
135+
136+ static inline void hlist_add_head (struct hlist_node * n , struct hlist_head * h )
137+ {
138+ struct hlist_node * first = h -> first ;
139+ n -> next = first ;
140+ if (first )
141+ first -> pprev = & n -> next ;
142+
143+ h -> first = n ;
144+ n -> pprev = & h -> first ;
145+ }
146+
147+ static inline bool hlist_unhashed (const struct hlist_node * h )
148+ {
149+ return !h -> pprev ;
150+ }
151+
152+ static inline void hlist_del (struct hlist_node * n )
153+ {
154+ struct hlist_node * next = n -> next ;
155+ struct hlist_node * * pprev = n -> pprev ;
156+
157+ * pprev = next ;
158+ if (next )
159+ next -> pprev = pprev ;
160+ }
161+
162+ static inline void hlist_del_init (struct hlist_node * n )
163+ {
164+ if (hlist_unhashed (n ))
165+ return ;
166+ hlist_del (n );
167+ INIT_HLIST_NODE (n );
168+ }
169+
170+ #define hlist_entry (ptr , type , member ) container_of(ptr, type, member)
171+
172+ #ifdef __HAVE_TYPEOF
173+ #define hlist_entry_safe (ptr , type , member ) \
174+ ({ \
175+ typeof(ptr) ____ptr = (ptr); \
176+ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \
177+ })
178+ #else
179+ #define hlist_entry_safe (ptr , type , member ) \
180+ (ptr) ? hlist_entry(ptr, type, member) : NULL
181+ #endif
182+
183+ #ifdef __HAVE_TYPEOF
184+ #define hlist_for_each_entry (pos , head , member ) \
185+ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); pos; \
186+ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member))
187+ #else
188+ #define hlist_for_each_entry (pos , head , member , type ) \
189+ for (pos = hlist_entry_safe((head)->first, type, member); pos; \
190+ pos = hlist_entry_safe((pos)->member.next, type, member))
191+ #endif
192+
193+ cache_t * cache_create (int size_bits )
194+ {
195+ cache_t * cache = malloc (sizeof (cache_t ));
196+ if (!cache )
197+ return NULL ;
198+ cache_size_bits = size_bits ;
199+ cache_size = 1 << size_bits ;
200+
201+ for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
202+ cache -> lists [i ] = malloc (sizeof (struct list_head ));
203+ INIT_LIST_HEAD (cache -> lists [i ]);
204+ cache -> list_size [i ] = 0 ;
205+ }
206+
207+ cache -> map = malloc (sizeof (hashtable_t ));
208+ if (!cache -> map ) {
209+ free (cache -> lists );
210+ free (cache );
211+ return NULL ;
212+ }
213+ cache -> map -> ht_list_head = malloc (cache_size * sizeof (struct hlist_head ));
214+ if (!cache -> map -> ht_list_head ) {
215+ free (cache -> map );
216+ free (cache -> lists );
217+ free (cache );
218+ return NULL ;
219+ }
220+ for (uint32_t i = 0 ; i < cache_size ; i ++ ) {
221+ INIT_HLIST_HEAD (& cache -> map -> ht_list_head [i ]);
222+ }
223+
224+ cache -> capacity = cache_size ;
225+ cache -> lru_capacity = cache_size / 2 ;
226+ return cache ;
227+ }
228+
229+ /* Rules of ARC
230+ * 1. size of LRU_list + size of LFU_list <= c
231+ * 2. size of LRU_list + size of LRU_ghost_list <= c
232+ * 3. size of LFU_list + size of LFU_ghost_list <= 2c
233+ * 4. size of LRU_list + size of LFU_list + size of LRU_ghost_list + size of
234+ * LFU_ghost_list <= 2c
235+ */
236+ #define CACHE_ASSERT (cache ) \
237+ assert(cache->list_size[LRU_list] + cache->list_size[LFU_list] <= \
238+ cache->capacity); \
239+ assert(cache->list_size[LRU_list] + cache->list_size[LRU_ghost_list] <= \
240+ cache->capacity); \
241+ assert(cache->list_size[LFU_list] + cache->list_size[LFU_ghost_list] <= \
242+ 2 * cache->capacity); \
243+ assert(cache->list_size[LRU_list] + cache->list_size[LRU_ghost_list] + \
244+ cache->list_size[LFU_list] + \
245+ cache->list_size[LFU_ghost_list] <= \
246+ 2 * cache->capacity);
247+
248+ static inline void move_to_mru (cache_t * cache ,
249+ arc_entry_t * entry ,
250+ const cache_list_t type )
251+ {
252+ cache -> list_size [entry -> type ]-- ;
253+ cache -> list_size [type ]++ ;
254+ entry -> type = type ;
255+ list_del_init (& entry -> list );
256+ list_add (& entry -> list , cache -> lists [type ]);
257+ }
258+
259+ static inline void replace_list (cache_t * cache )
260+ {
261+ if (cache -> list_size [LRU_list ] >= cache -> lru_capacity )
262+ move_to_mru (cache ,
263+ list_last_entry (cache -> lists [LRU_list ], arc_entry_t , list ),
264+ LRU_ghost_list );
265+ else if (cache -> list_size [LFU_list ] >=
266+ (cache -> capacity - cache -> lru_capacity ))
267+ move_to_mru (cache ,
268+ list_last_entry (cache -> lists [LFU_list ], arc_entry_t , list ),
269+ LFU_ghost_list );
270+ }
271+
272+ void * cache_get (cache_t * cache , uint32_t key )
273+ {
274+ if (!cache -> capacity || hlist_empty (& cache -> map -> ht_list_head [HASH (key )]))
275+ return NULL ;
276+
277+ arc_entry_t * entry = NULL ;
278+ #ifdef __HAVE_TYPEOF
279+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list )
280+ #else
281+ hlist_for_each_entry (entry , & cache -> map -> ht_list_head [HASH (key )], ht_list ,
282+ arc_entry_t )
283+ #endif
284+ {
285+ if (entry -> key == key )
286+ break ;
287+ }
288+ if (!entry || entry -> key != key )
289+ return NULL ;
290+ /* cache hit in LRU_list */
291+ if (entry -> type == LRU_list ) {
292+ replace_list (cache );
293+ move_to_mru (cache , entry , LFU_list );
294+ }
295+
296+ /* cache hit in LFU_list */
297+ if (entry -> type == LFU_list )
298+ move_to_mru (cache , entry , LFU_list );
299+
300+ /* cache hit in LRU_ghost_list */
301+ if (entry -> type == LRU_ghost_list ) {
302+ cache -> lru_capacity = MIN (cache -> lru_capacity + 1 , cache -> capacity );
303+ replace_list (cache );
304+ move_to_mru (cache , entry , LFU_list );
305+ }
306+
307+ /* cache hit in LFU_ghost_list */
308+ if (entry -> type == LFU_ghost_list ) {
309+ cache -> lru_capacity = cache -> lru_capacity ? cache -> lru_capacity - 1 : 0 ;
310+ replace_list (cache );
311+ move_to_mru (cache , entry , LFU_list );
312+ }
313+ CACHE_ASSERT (cache );
314+ /* return NULL if cache miss */
315+ return entry -> value ;
316+ }
317+
318+ void * cache_put (cache_t * cache , uint32_t key , void * value )
319+ {
320+ void * delete_value = NULL ;
321+ assert (cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] <=
322+ cache -> capacity );
323+ /* Before adding new element to cach, we should check the status
324+ * of cache.
325+ */
326+ if ((cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ]) ==
327+ cache -> capacity ) {
328+ if (cache -> list_size [LRU_list ] < cache -> capacity ) {
329+ arc_entry_t * delete_target = list_last_entry (
330+ cache -> lists [LRU_ghost_list ], arc_entry_t , list );
331+ list_del_init (& delete_target -> list );
332+ hlist_del_init (& delete_target -> ht_list );
333+ delete_value = delete_target -> value ;
334+ free (delete_target );
335+ cache -> list_size [LRU_ghost_list ]-- ;
336+ replace_list (cache );
337+ } else {
338+ arc_entry_t * delete_target =
339+ list_last_entry (cache -> lists [LRU_list ], arc_entry_t , list );
340+ list_del_init (& delete_target -> list );
341+ hlist_del_init (& delete_target -> ht_list );
342+ delete_value = delete_target -> value ;
343+ free (delete_target );
344+ cache -> list_size [LRU_list ]-- ;
345+ }
346+ } else {
347+ assert (cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] <
348+ cache -> capacity );
349+ uint32_t size =
350+ cache -> list_size [LRU_list ] + cache -> list_size [LRU_ghost_list ] +
351+ cache -> list_size [LFU_list ] + cache -> list_size [LFU_ghost_list ];
352+ if (size == cache -> capacity * 2 ) {
353+ arc_entry_t * delete_target = list_last_entry (
354+ cache -> lists [LFU_ghost_list ], arc_entry_t , list );
355+ list_del_init (& delete_target -> list );
356+ hlist_del_init (& delete_target -> ht_list );
357+ delete_value = delete_target -> value ;
358+ free (delete_target );
359+ cache -> list_size [LFU_ghost_list ]-- ;
360+ }
361+ replace_list (cache );
362+ }
363+ arc_entry_t * new_entry = malloc (sizeof (arc_entry_t ));
364+ new_entry -> key = key ;
365+ new_entry -> value = value ;
366+ new_entry -> type = LRU_list ;
367+ list_add (& new_entry -> list , cache -> lists [LRU_list ]);
368+ hlist_add_head (& new_entry -> ht_list , & cache -> map -> ht_list_head [HASH (key )]);
369+ cache -> list_size [LRU_list ]++ ;
370+ CACHE_ASSERT (cache );
371+ return delete_value ;
372+ }
373+
374+ void cache_free (cache_t * cache , void (* callback )(void * ))
375+ {
376+ for (int i = 0 ; i < N_CACHE_LIST_TYPES ; i ++ ) {
377+ arc_entry_t * entry , * safe ;
378+ #ifdef __HAVE_TYPEOF
379+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list )
380+ #else
381+ list_for_each_entry_safe (entry , safe , cache -> lists [i ], list ,
382+ arc_entry_t )
383+ #endif
384+ callback (entry -> value );
385+ }
386+ free (cache -> map -> ht_list_head );
387+ free (cache -> map );
388+ free (cache );
389+ }
0 commit comments