Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@ module github.com/FollowTheProcess/gowc

go 1.22

require github.com/FollowTheProcess/test v0.13.0
require (
github.com/FollowTheProcess/cli v0.1.0
github.com/FollowTheProcess/test v0.13.0
)

require github.com/google/go-cmp v0.6.0 // indirect
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
github.com/FollowTheProcess/cli v0.1.0 h1:1IbswvnwbF8rGjU9ByeKKHnLiwfoCVW2zwIKMCoOFWk=
github.com/FollowTheProcess/cli v0.1.0/go.mod h1:WpuVeGASGKCIEuFSlBSSOq3c+aw77xd1dVfVoXMSfcI=
github.com/FollowTheProcess/test v0.13.0 h1:u0TP9jIM2Pho3CtwjyLptr1M8ScEDbfZzc88DMT7eg0=
github.com/FollowTheProcess/test v0.13.0/go.mod h1:oIqlUoS8wKFmKBFBMJH/+asP7lQXE2D3YFhmUEubTUw=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
Expand Down
99 changes: 50 additions & 49 deletions internal/count/count.go
Original file line number Diff line number Diff line change
Expand Up @@ -13,39 +13,40 @@ import (
"text/tabwriter"
)

// TabWriter options.
// TableWriter config.
const (
minWidth = 0
tabWidth = 8
padding = 1
padChar = '\t'
minWidth = 1 // Min cell width
tabWidth = 8 // Tab width in spaces
padding = 2 // Padding
padChar = ' ' // Char to pad with
flags = 0 // Config flags
)

type (
LineCounter uint64
ByteCounter uint64
WordCounter uint64
CharCounter uint64
Lines uint64
Bytes uint64
Words uint64
Chars uint64
)

// Result encodes the result of a counting operation on a file.
type Result struct {
Name string `json:"name"`
Lines LineCounter `json:"lines"`
Bytes ByteCounter `json:"bytes"`
Words WordCounter `json:"words"`
Chars CharCounter `json:"chars"`
Name string `json:"name"`
Lines Lines `json:"lines"`
Bytes Bytes `json:"bytes"`
Words Words `json:"words"`
Chars Chars `json:"chars"`
}

// Display outputs the Count as a pretty table to w.
func (r Result) Display(w io.Writer, jsonFlag bool) error {
if jsonFlag {
func (r Result) Display(w io.Writer, toJSON bool) error {
if toJSON {
if err := json.NewEncoder(w).Encode(r); err != nil {
return fmt.Errorf("failed to serialise JSON: %w", err)
}
return nil
}
tab := tabwriter.NewWriter(w, minWidth, tabWidth, padding, padChar, tabwriter.DiscardEmptyColumns|tabwriter.AlignRight)
tab := tabwriter.NewWriter(w, minWidth, tabWidth, padding, padChar, flags)
fmt.Fprintln(tab, "File\tBytes\tChars\tLines\tWords")
fmt.Fprintf(tab, "%s\t%d\t%d\t%d\t%d\n", r.Name, r.Bytes, r.Chars, r.Lines, r.Words)
return tab.Flush()
Expand All @@ -55,32 +56,30 @@ func (r Result) Display(w io.Writer, jsonFlag bool) error {
type Results []Result

// Display outputs the Results to w.
func (r Results) Display(w io.Writer, jsonFlag bool) error {
if jsonFlag {
func (r Results) Display(w io.Writer, toJSON bool) error {
if toJSON {
if err := json.NewEncoder(w).Encode(r); err != nil {
return fmt.Errorf("failed to serialise JSON: %w", err)
}
return nil
}
tab := tabwriter.NewWriter(w, minWidth, tabWidth, padding, padChar, tabwriter.DiscardEmptyColumns|tabwriter.AlignRight)
tab := tabwriter.NewWriter(w, minWidth, tabWidth, padding, padChar, flags)
fmt.Fprintln(tab, "File\tBytes\tChars\tLines\tWords")
for _, res := range r {
fmt.Fprintf(tab, "%s\t%d\t%d\t%d\t%d\n", res.Name, res.Bytes, res.Chars, res.Lines, res.Words)
for _, result := range r {
fmt.Fprintf(tab, "%s\t%d\t%d\t%d\t%d\n", result.Name, result.Bytes, result.Chars, result.Lines, result.Words)
}
return tab.Flush()
}

// One performs a counting operation on a single reader, returning the result and any error.
func One(in io.Reader, name string) (Result, error) {
var (
lc LineCounter
bc ByteCounter
wc WordCounter
cc CharCounter
lc Lines
bc Bytes
wc Words
cc Chars
)

// TODO: This currently copies to each writer one at a time when there's really no need
// they are all separate so could be parallelised
multi := io.MultiWriter(&lc, &bc, &wc, &cc)

_, err := io.Copy(multi, in)
Expand All @@ -106,7 +105,7 @@ func All(files []string) (Results, error) {
// Keep a waitgroup so we know when all the workers are done
var wg sync.WaitGroup

// Launch a concurrent worker pool to chew through the queue of files to hash
// Launch a concurrent worker pool to chew through the queue of files to count
// these will all initially block as no files are on the jobs channel yet
// nWorkers is min of NumCPU and len(files) so we don't start more workers than
// is necessary (no point kicking off 8 workers to do 3 files for example)
Expand Down Expand Up @@ -150,17 +149,19 @@ func All(files []string) (Results, error) {
func worker(counts chan<- Result, files <-chan string, wg *sync.WaitGroup) {
defer wg.Done()
for file := range files {
f, err := os.Open(file)
info, err := os.Stat(file)
if err != nil {
// If we can't open the (possibly dir) path
// then just close the file and continue
f.Close()
continue
panic(err) // TODO: Not this
}
info, _ := f.Stat() //nolint: errcheck // The file is already open here so we can ignore the error
// Skip directories
if info.IsDir() {
// Same as above, ensure we close the file before carrying on
// Skip directories
continue
}

f, err := os.Open(file)
if err != nil {
// If we can't open the file, just close it and move on
// TODO: Handle this better
f.Close()
continue
}
Expand All @@ -175,12 +176,12 @@ func worker(counts chan<- Result, files <-chan string, wg *sync.WaitGroup) {
}
}

// Write implements [io.Writer] for LineCounter.
// Write implements [io.Writer] for Lines.
//
// It doesn't actually write anything, but increments the line count
// on every newline in data, allowing a LineCounter to be used as dst
// on every newline in data, allowing a Lines to be used as dst
// in a call to [io.Copy].
func (l *LineCounter) Write(data []byte) (int, error) {
func (l *Lines) Write(data []byte) (int, error) {
for _, byt := range data {
if byt == '\n' {
*l++
Expand All @@ -189,24 +190,24 @@ func (l *LineCounter) Write(data []byte) (int, error) {
return len(data), nil
}

// Write implements [io.Writer] for ByteCounter.
// Write implements [io.Writer] for Bytes.
//
// It doesn't actually write anything, but increments the byte count
// on every byte in data, allowing a ByteCounter to be used as dst
// on every byte in data, allowing a Bytes to be used as dst
// in a call to [io.Copy].
func (b *ByteCounter) Write(data []byte) (int, error) {
func (b *Bytes) Write(data []byte) (int, error) {
for range data {
*b++
}
return len(data), nil
}

// Write implements [io.Writer] for WordCounter.
// Write implements [io.Writer] for Words.
//
// It doesn't actually write anything, but increments the word count
// on every word in data, allowing a WordCounter to be used as dst
// on every word in data, allowing a Words to be used as dst
// in a call to [io.Copy].
func (w *WordCounter) Write(data []byte) (int, error) {
func (w *Words) Write(data []byte) (int, error) {
scanner := bufio.NewScanner(bytes.NewReader(data))
scanner.Split(bufio.ScanWords)
for scanner.Scan() {
Expand All @@ -215,12 +216,12 @@ func (w *WordCounter) Write(data []byte) (int, error) {
return len(data), nil
}

// Write implements [io.Writer] for CharCounter.
// Write implements [io.Writer] for Chars.
//
// It doesn't actually write anything, but increments the char count
// on every utf8 rune in data, allowing a CharCounter to be used as dst
// on every utf8 rune in data, allowing a Chars to be used as dst
// in a call to [io.Copy].
func (c *CharCounter) Write(data []byte) (int, error) {
func (c *Chars) Write(data []byte) (int, error) {
scanner := bufio.NewScanner(bytes.NewReader(data))
scanner.Split(bufio.ScanRunes)
for scanner.Scan() {
Expand Down
Loading