Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 21 additions & 2 deletions daemon/cc_info_timer.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,9 @@ type CCInfoTimerService struct {
stopChan chan struct{}
wg sync.WaitGroup

// Guards concurrent fetchRateLimit goroutines
rateLimitFetchMu sync.Mutex

// Git info cache (per working directory)
gitCache map[string]*GitCacheEntry

Expand Down Expand Up @@ -160,7 +163,15 @@ func (s *CCInfoTimerService) timerLoop() {
// Fetch immediately on start
s.fetchActiveRanges(context.Background())
s.fetchGitInfo()
go s.fetchRateLimit(context.Background())
go func() {
if !s.rateLimitFetchMu.TryLock() {
return
}
defer s.rateLimitFetchMu.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
s.fetchRateLimit(ctx)
}()
go s.fetchUserProfile(context.Background())

for {
Expand All @@ -175,7 +186,15 @@ func (s *CCInfoTimerService) timerLoop() {
}
s.fetchActiveRanges(context.Background())
s.fetchGitInfo()
go s.fetchRateLimit(context.Background())
go func() {
if !s.rateLimitFetchMu.TryLock() {
return
}
defer s.rateLimitFetchMu.Unlock()
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
s.fetchRateLimit(ctx)
}()
Comment on lines +189 to +197
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

This block of code for fetching the rate limit is a duplicate of the logic at lines 166-174. To improve maintainability and follow the DRY (Don't Repeat Yourself) principle, consider extracting this logic into a new private method on CCInfoTimerService. You could then call this new method as a goroutine from both locations.


case <-s.stopChan:
return
Expand Down
21 changes: 19 additions & 2 deletions daemon/chan.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@ package daemon

import (
"context"
"fmt"
"sync"
"time"

"github.com/lithammer/shortuuid/v3"
"github.com/pkg/errors"
Expand Down Expand Up @@ -344,6 +346,9 @@ func (s *subscriber) sendMessageToSubscriber(msg *message.Message, logFields wat
ctx, cancelCtx := context.WithCancel(s.ctx)
defer cancelCtx()

const maxRetries = 3
retryCount := 0

SendToSubscriber:
for {
// copy the message to prevent ack/nack propagation to other consumers
Expand Down Expand Up @@ -371,8 +376,20 @@ SendToSubscriber:
s.logger.Trace("Message acked", logFields)
return
case <-msgToSend.Nacked():
s.logger.Trace("Nack received, resending message", logFields)
continue SendToSubscriber
retryCount++
if retryCount > maxRetries {
s.logger.Error("Max retries reached, dropping message", errors.New("max retries reached"), logFields)
return
}
backoff := time.Duration(100<<uint(retryCount-1)) * time.Millisecond
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The value 100 used for the initial backoff is a magic number. To improve readability and maintainability, it's better to define it as a named constant, for example: const initialBackoffMillis = 100. The code would then be more self-explanatory: backoff := time.Duration(initialBackoffMillis<<uint(retryCount-1)) * time.Millisecond.

s.logger.Trace(fmt.Sprintf("Nack received, retrying after %s (%d/%d)", backoff, retryCount, maxRetries), logFields)
select {
case <-time.After(backoff):
continue SendToSubscriber
case <-s.closing:
s.logger.Trace("Closing during backoff, message discarded", logFields)
return
}
case <-s.closing:
s.logger.Trace("Closing, message discarded", logFields)
return
Expand Down
36 changes: 19 additions & 17 deletions daemon/git.go
Original file line number Diff line number Diff line change
@@ -1,9 +1,14 @@
package daemon

import (
"github.com/go-git/go-git/v5"
"context"
"os/exec"
"strings"
"time"
)

const gitCmdTimeout = 2 * time.Second

// GitInfo contains git repository information
type GitInfo struct {
Branch string
Expand All @@ -12,34 +17,31 @@ type GitInfo struct {
}

// GetGitInfo returns git branch and dirty status for a directory.
// It walks up the directory tree to find the git repository root.
// It uses the native git CLI which is significantly faster and more memory-efficient
// than the pure-Go go-git implementation, especially on large repositories.
func GetGitInfo(workingDir string) GitInfo {
if workingDir == "" {
return GitInfo{}
}

repo, err := git.PlainOpenWithOptions(workingDir, &git.PlainOpenOptions{
DetectDotGit: true, // Walk up to find .git
})
if err != nil {
return GitInfo{} // Not a git repo
ctx, cancel := context.WithTimeout(context.Background(), gitCmdTimeout)
defer cancel()

// Check if this is a git repo
if err := exec.CommandContext(ctx, "git", "-C", workingDir, "rev-parse", "--git-dir").Run(); err != nil {
return GitInfo{}
}

info := GitInfo{IsRepo: true}

// Get branch name from HEAD
head, err := repo.Head()
if err == nil {
info.Branch = head.Name().Short()
if out, err := exec.CommandContext(ctx, "git", "-C", workingDir, "rev-parse", "--abbrev-ref", "HEAD").Output(); err == nil {
info.Branch = strings.TrimSpace(string(out))
}

// Check dirty status by examining worktree status
worktree, err := repo.Worktree()
if err == nil {
status, err := worktree.Status()
if err == nil {
info.Dirty = !status.IsClean()
}
// Check dirty status via porcelain output (empty = clean)
if out, err := exec.CommandContext(ctx, "git", "-C", workingDir, "status", "--porcelain").Output(); err == nil {
info.Dirty = len(strings.TrimSpace(string(out))) > 0
}

return info
Expand Down
17 changes: 9 additions & 8 deletions daemon/socket.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,21 +127,22 @@ func (p *SocketHandler) Stop() {

func (p *SocketHandler) acceptConnections() {
for {
select {
case <-p.stopChan:
return
default:
conn, err := p.listener.Accept()
if err != nil {
continue
conn, err := p.listener.Accept()
if err != nil {
select {
case <-p.stopChan:
return
default:
}
go p.handleConnection(conn)
continue
Comment on lines +131 to +137
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

high

If p.listener.Accept() returns an error that is not due to the listener being closed (e.g., a temporary system error like running out of file descriptors), this loop could become a busy-loop, consuming high CPU. To make this more robust, consider handling specific errors. You could check if the error is net.ErrClosed to return from the function, and for other errors, log them and perhaps add a small delay to prevent spinning.

}
go p.handleConnection(conn)
}
}

func (p *SocketHandler) handleConnection(conn net.Conn) {
defer conn.Close()
conn.SetDeadline(time.Now().Add(5 * time.Second))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

medium

The 5-second connection deadline is a magic number. It would be better to define this as a named constant at the package level, like const connectionDeadline = 5 * time.Second, to improve readability and make it easier to configure in the future.

decoder := json.NewDecoder(conn)
var msg SocketMessage
if err := decoder.Decode(&msg); err != nil {
Expand Down