-
Notifications
You must be signed in to change notification settings - Fork 324
perf(markdown): cache syntax highlighting results for code blocks #2119
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,72 @@ | ||
| package markdown | ||
|
|
||
| import "container/list" | ||
|
|
||
| // lruCache is a simple LRU (Least Recently Used) cache with a fixed maximum size. | ||
| // It is NOT safe for concurrent use; callers must provide their own synchronization. | ||
| type lruCache[K comparable, V any] struct { | ||
| maxSize int | ||
| items map[K]*list.Element | ||
| order *list.List // front = most recently used | ||
| } | ||
|
|
||
| type lruEntry[K comparable, V any] struct { | ||
| key K | ||
| value V | ||
| } | ||
|
|
||
| // newLRUCache creates an LRU cache that holds at most maxSize entries. | ||
| func newLRUCache[K comparable, V any](maxSize int) *lruCache[K, V] { | ||
| return &lruCache[K, V]{ | ||
| maxSize: maxSize, | ||
| items: make(map[K]*list.Element, maxSize), | ||
| order: list.New(), | ||
| } | ||
| } | ||
|
|
||
| // get retrieves a value from the cache, promoting it to most-recently-used. | ||
| // Returns the value and true if found, or the zero value and false otherwise. | ||
| func (c *lruCache[K, V]) get(key K) (V, bool) { | ||
| if elem, ok := c.items[key]; ok { | ||
| c.order.MoveToFront(elem) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 🔴 CRITICAL: Data race in concurrent cache reads The The Problem: Multiple goroutines can hold Evidence: The comment on line 6 of this file states: "It is NOT safe for concurrent use; callers must provide their own synchronization." The caller violates this by using Fix Options:
Impact: This will cause undefined behavior and potential crashes when multiple goroutines render markdown concurrently (common in a TUI application). |
||
| return elem.Value.(*lruEntry[K, V]).value, true | ||
| } | ||
| var zero V | ||
| return zero, false | ||
| } | ||
|
|
||
| // put adds or updates a key-value pair in the cache. | ||
| // If the cache is at capacity, the least recently used entry is evicted. | ||
| func (c *lruCache[K, V]) put(key K, value V) { | ||
| if elem, ok := c.items[key]; ok { | ||
| // Update existing entry | ||
| c.order.MoveToFront(elem) | ||
| elem.Value.(*lruEntry[K, V]).value = value | ||
| return | ||
| } | ||
|
|
||
| // Evict if at capacity | ||
| if c.order.Len() >= c.maxSize { | ||
| c.evictOldest() | ||
| } | ||
|
|
||
| entry := &lruEntry[K, V]{key: key, value: value} | ||
| elem := c.order.PushFront(entry) | ||
| c.items[key] = elem | ||
| } | ||
|
|
||
| // clear removes all entries from the cache. | ||
| func (c *lruCache[K, V]) clear() { | ||
| c.items = make(map[K]*list.Element, c.maxSize) | ||
| c.order.Init() | ||
| } | ||
|
|
||
| // evictOldest removes the least recently used entry. | ||
| func (c *lruCache[K, V]) evictOldest() { | ||
| oldest := c.order.Back() | ||
| if oldest == nil { | ||
| return | ||
| } | ||
| c.order.Remove(oldest) | ||
| delete(c.items, oldest.Value.(*lruEntry[K, V]).key) | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,130 @@ | ||
| package markdown | ||
|
|
||
| import "testing" | ||
|
|
||
| func TestLRUCache_BasicGetPut(t *testing.T) { | ||
| c := newLRUCache[string, int](3) | ||
|
|
||
| c.put("a", 1) | ||
| c.put("b", 2) | ||
| c.put("c", 3) | ||
|
|
||
| v, ok := c.get("a") | ||
| if !ok || v != 1 { | ||
| t.Fatalf("expected (1, true), got (%d, %v)", v, ok) | ||
| } | ||
| v, ok = c.get("b") | ||
| if !ok || v != 2 { | ||
| t.Fatalf("expected (2, true), got (%d, %v)", v, ok) | ||
| } | ||
| v, ok = c.get("c") | ||
| if !ok || v != 3 { | ||
| t.Fatalf("expected (3, true), got (%d, %v)", v, ok) | ||
| } | ||
| } | ||
|
|
||
| func TestLRUCache_Miss(t *testing.T) { | ||
| c := newLRUCache[string, int](2) | ||
|
|
||
| _, ok := c.get("missing") | ||
| if ok { | ||
| t.Fatal("expected miss for non-existent key") | ||
| } | ||
| } | ||
|
|
||
| func TestLRUCache_Eviction(t *testing.T) { | ||
| c := newLRUCache[string, int](2) | ||
|
|
||
| c.put("a", 1) | ||
| c.put("b", 2) | ||
| // Cache is full: [b, a] (b is most recent) | ||
|
|
||
| c.put("c", 3) | ||
| // "a" should be evicted as least recently used: [c, b] | ||
|
|
||
| _, ok := c.get("a") | ||
| if ok { | ||
| t.Fatal("expected 'a' to be evicted") | ||
| } | ||
|
|
||
| v, ok := c.get("b") | ||
| if !ok || v != 2 { | ||
| t.Fatalf("expected (2, true), got (%d, %v)", v, ok) | ||
| } | ||
| v, ok = c.get("c") | ||
| if !ok || v != 3 { | ||
| t.Fatalf("expected (3, true), got (%d, %v)", v, ok) | ||
| } | ||
| } | ||
|
|
||
| func TestLRUCache_GetPromotesEntry(t *testing.T) { | ||
| c := newLRUCache[string, int](2) | ||
|
|
||
| c.put("a", 1) | ||
| c.put("b", 2) | ||
| // [b, a] | ||
|
|
||
| // Access "a" to promote it | ||
| c.get("a") | ||
| // Now [a, b] | ||
|
|
||
| // Add "c" - should evict "b" (now least recently used) | ||
| c.put("c", 3) | ||
|
|
||
| _, ok := c.get("b") | ||
| if ok { | ||
| t.Fatal("expected 'b' to be evicted after 'a' was promoted") | ||
| } | ||
|
|
||
| v, ok := c.get("a") | ||
| if !ok || v != 1 { | ||
| t.Fatalf("expected (1, true), got (%d, %v)", v, ok) | ||
| } | ||
| } | ||
|
|
||
| func TestLRUCache_UpdateExistingKey(t *testing.T) { | ||
| c := newLRUCache[string, int](2) | ||
|
|
||
| c.put("a", 1) | ||
| c.put("b", 2) | ||
|
|
||
| // Update "a" | ||
| c.put("a", 10) | ||
|
|
||
| v, ok := c.get("a") | ||
| if !ok || v != 10 { | ||
| t.Fatalf("expected (10, true), got (%d, %v)", v, ok) | ||
| } | ||
|
|
||
| // "a" was promoted by the update, so adding "c" should evict "b" | ||
| c.put("c", 3) | ||
| _, ok = c.get("b") | ||
| if ok { | ||
| t.Fatal("expected 'b' to be evicted") | ||
| } | ||
| } | ||
|
|
||
| func TestLRUCache_Clear(t *testing.T) { | ||
| c := newLRUCache[string, int](3) | ||
|
|
||
| c.put("a", 1) | ||
| c.put("b", 2) | ||
|
|
||
| c.clear() | ||
|
|
||
| _, ok := c.get("a") | ||
| if ok { | ||
| t.Fatal("expected empty cache after clear") | ||
| } | ||
| _, ok = c.get("b") | ||
| if ok { | ||
| t.Fatal("expected empty cache after clear") | ||
| } | ||
|
|
||
| // Should work normally after clear | ||
| c.put("c", 3) | ||
| v, ok := c.get("c") | ||
| if !ok || v != 3 { | ||
| t.Fatalf("expected (3, true), got (%d, %v)", v, ok) | ||
| } | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The cached
[]tokenslice is returned directly to the caller without copying. This means both the cache and the caller hold references to the same underlying array.Potential Issues:
Current Risk: Low - the code appears to only read the tokens during rendering, not modify them. However, this is a latent bug that could be triggered by future code changes.
Recommended Fix: Return a copy of the slice:
Alternatively, document that the returned slice must not be modified and add a comment to that effect.