这是indexloc提供的服务,不要输入任何密码
Skip to content

add logs command to turbo CLI #827

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 42 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
42 commits
Select commit Hold shift + click to select a range
794f231
feat: add log of previous run's task hashes
chelkyl Mar 7, 2022
7a41d35
feat: add logs command to review log replays
chelkyl Mar 7, 2022
b245631
Merge branch 'main' into add-logs-command
Mar 7, 2022
0286cd6
Merge branch 'vercel:main' into add-logs-command
chelkyl Mar 10, 2022
88f42d3
Merge branch 'main' of github.com:vercel/turborepo into add-logs-command
chelkyl Mar 13, 2022
2c92ee9
refactor: match refactor in run command
chelkyl Mar 13, 2022
52e61af
feat: add info logging with color
chelkyl Mar 13, 2022
aea2fd5
feat: suggest --all flag if no last run log
chelkyl Mar 13, 2022
4038ba2
fix: specify which hash did not have replay logs
chelkyl Mar 13, 2022
99bfbc9
docs: update --progress to --output-logs flag
chelkyl Mar 13, 2022
48a9318
fix: use more loose glob for replay logs
chelkyl Mar 13, 2022
0618899
refactor: sort mode strings should be enums
chelkyl Mar 16, 2022
a4ced7f
feat: add time sorting
chelkyl Mar 16, 2022
a783f00
feat: store start time in metadata file
chelkyl Mar 19, 2022
9802b2b
refactor: sort by task start and end, not fs time
chelkyl Mar 19, 2022
1a8813a
feat: remove unnecessary len checks
chelkyl Mar 20, 2022
3152c6d
refactor: use switch for readability
chelkyl Mar 20, 2022
d450cbc
refactor: rename and add more metadata points
chelkyl Mar 20, 2022
7e99803
feat: allow pass metadata opts as csv
chelkyl Mar 20, 2022
f9a722d
docs: sync with logs command help
chelkyl Mar 20, 2022
003ed38
Merge branch 'main' of github.com:vercel/turborepo into add-logs-command
chelkyl Mar 20, 2022
1f422d4
Merge branch 'main' of github.com:vercel/turborepo into add-logs-command
chelkyl Apr 1, 2022
353ee0d
Merge branch 'main' into add-logs-command
jaredpalmer Apr 5, 2022
5a0c876
Merge branch 'main' of github.com:vercel/turborepo into add-logs-command
chelkyl Apr 24, 2022
173ca8f
fix: sync with run.go
chelkyl Apr 24, 2022
ba95f0c
refactor: use SortMode and run.go LogsMode enums
chelkyl Apr 24, 2022
bafbeef
fix: only print metadata once per hash not per log
chelkyl Apr 24, 2022
23bb051
feat: sync with run.go
chelkyl Apr 24, 2022
56b8bf1
fix: HasPrefix check must match slice
chelkyl Apr 24, 2022
7a22853
Revert "feat: sync with run.go
chelkyl Apr 24, 2022
fda4caa
feat: output log line should reflect behavior
chelkyl Apr 24, 2022
7c35c2f
feat: add all metadata option
chelkyl Apr 24, 2022
dea58d5
feat: warn if err on remove last run log
chelkyl Apr 24, 2022
b4f3cf7
feat: warn if err on updating last run log
chelkyl Apr 24, 2022
9f6e1dc
fix: prefer all caps abbrevs
chelkyl Apr 24, 2022
7284b9f
fix: prefer fmt.Errorf
chelkyl Apr 24, 2022
3d887be
refactor: unexport sortMode and metadataName
chelkyl Apr 24, 2022
f25f42d
lint: ignore errcheck where nothing to do
chelkyl Apr 24, 2022
3d6c7aa
lint: ignore ALL_CAPS, add as todo
chelkyl Apr 24, 2022
f6a039e
refactor: unexport sortMode and metadataName enums
chelkyl Apr 24, 2022
223588b
lint: missed ignore type
chelkyl Apr 24, 2022
46c3efa
Merge branch 'main' of github.com:vercel/turborepo into add-logs-command
chelkyl Apr 25, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions cli/cmd/turbo/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,9 @@ func main() {
return &run.RunCommand{Config: cf, Ui: ui, Processes: processes},
nil
},
"logs": func() (cli.Command, error) {
return &run.LogsCommand{Config: cf, UI: ui}, nil
},
"prune": func() (cli.Command, error) {
return &prune.PruneCommand{Config: cf, Ui: ui}, nil
},
Expand Down
9 changes: 6 additions & 3 deletions cli/internal/cache/async_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ package cache

import (
"sync"
"time"

"github.com/vercel/turborepo/cli/internal/config"
)
Expand All @@ -24,6 +25,7 @@ type asyncCache struct {
type cacheRequest struct {
target string
key string
start time.Time
duration int
files []string
}
Expand All @@ -40,17 +42,18 @@ func newAsyncCache(realCache Cache, config *config.Config) Cache {
return c
}

func (c *asyncCache) Put(target string, key string, duration int, files []string) error {
func (c *asyncCache) Put(target string, key string, start time.Time, duration int, files []string) error {
c.requests <- cacheRequest{
target: target,
key: key,
files: files,
duration: duration,
start: start,
}
return nil
}

func (c *asyncCache) Fetch(target string, key string, files []string) (bool, []string, int, error) {
func (c *asyncCache) Fetch(target string, key string, files []string) (bool, []string, time.Time, int, error) {
return c.realCache.Fetch(target, key, files)
}

Expand All @@ -72,7 +75,7 @@ func (c *asyncCache) Shutdown() {
// run implements the actual async logic.
func (c *asyncCache) run() {
for r := range c.requests {
c.realCache.Put(r.target, r.key, r.duration, r.files)
c.realCache.Put(r.target, r.key, r.start, r.duration, r.files) //nolint:golint,errcheck // nothing to do
}
c.wg.Done()
}
35 changes: 20 additions & 15 deletions cli/internal/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ package cache

import (
"fmt"
"time"

"github.com/vercel/turborepo/cli/internal/analytics"
"github.com/vercel/turborepo/cli/internal/config"
Expand All @@ -18,9 +19,9 @@ import (
type Cache interface {
// Fetch returns true if there is a cache it. It is expected to move files
// into their correct position as a side effect
Fetch(target string, hash string, files []string) (bool, []string, int, error)
Fetch(target string, hash string, files []string) (bool, []string, time.Time, int, error)
// Put caches files for a given hash
Put(target string, hash string, duration int, files []string) error
Put(target string, hash string, start time.Time, duration int, files []string) error
Clean(target string)
CleanAll()
Shutdown()
Expand All @@ -29,11 +30,15 @@ type Cache interface {
const cacheEventHit = "HIT"
const cacheEventMiss = "MISS"

// notime is the time used to represent invalid or undefined time
var notime = time.Date(0, time.January, 0, 0, 0, 0, 0, time.UTC)

type CacheEvent struct {
Source string `mapstructure:"source"`
Event string `mapstructure:"event"`
Hash string `mapstructure:"hash"`
Duration int `mapstructure:"duration"`
Source string `mapstructure:"source"`
Event string `mapstructure:"event"`
Hash string `mapstructure:"hash"`
Start time.Time `mapstructure:"start"`
Duration int `mapstructure:"duration"`
}

// New creates a new cache
Expand Down Expand Up @@ -68,16 +73,16 @@ type cacheMultiplexer struct {
caches []Cache
}

func (mplex cacheMultiplexer) Put(target string, key string, duration int, files []string) error {
return mplex.storeUntil(target, key, duration, files, len(mplex.caches))
func (mplex cacheMultiplexer) Put(target string, key string, start time.Time, duration int, files []string) error {
return mplex.storeUntil(target, key, start, duration, files, len(mplex.caches))
}

// storeUntil stores artifacts into higher priority caches than the given one.
// Used after artifact retrieval to ensure we have them in eg. the directory cache after
// downloading from the RPC cache.
// This is a little inefficient since we could write the file to plz-out then copy it to the dir cache,
// but it's hard to fix that without breaking the cache abstraction.
func (mplex cacheMultiplexer) storeUntil(target string, key string, duration int, outputGlobs []string, stopAt int) error {
func (mplex cacheMultiplexer) storeUntil(target string, key string, start time.Time, duration int, outputGlobs []string, stopAt int) error {
// Attempt to store on all caches simultaneously.
g := &errgroup.Group{}
for i, cache := range mplex.caches {
Expand All @@ -86,7 +91,7 @@ func (mplex cacheMultiplexer) storeUntil(target string, key string, duration int
}
c := cache
g.Go(func() error {
return c.Put(target, key, duration, outputGlobs)
return c.Put(target, key, start, duration, outputGlobs)
})
}

Expand All @@ -97,19 +102,19 @@ func (mplex cacheMultiplexer) storeUntil(target string, key string, duration int
return nil
}

func (mplex cacheMultiplexer) Fetch(target string, key string, files []string) (bool, []string, int, error) {
func (mplex cacheMultiplexer) Fetch(target string, key string, files []string) (bool, []string, time.Time, int, error) {
// Retrieve from caches sequentially; if we did them simultaneously we could
// easily write the same file from two goroutines at once.
for i, cache := range mplex.caches {
if ok, actualFiles, duration, err := cache.Fetch(target, key, files); ok {
if ok, actualFiles, start, duration, err := cache.Fetch(target, key, files); ok {
// Store this into other caches. We can ignore errors here because we know
// we have previously successfully stored in a higher-priority cache, and so the overall
// result is a success at fetching. Storing in lower-priority caches is an optimization.
mplex.storeUntil(target, key, duration, actualFiles, i)
return ok, actualFiles, duration, err
mplex.storeUntil(target, key, start, duration, actualFiles, i) //nolint:golint,errcheck // nothing to do
return ok, actualFiles, start, duration, err
}
}
return false, files, 0, nil
return false, files, notime, 0, nil
}

func (mplex cacheMultiplexer) Clean(target string) {
Expand Down
61 changes: 50 additions & 11 deletions cli/internal/cache/cache_fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,14 @@
package cache

import (
"bufio"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"time"

"github.com/vercel/turborepo/cli/internal/analytics"
"github.com/vercel/turborepo/cli/internal/config"
Expand All @@ -29,31 +31,31 @@ func newFsCache(config *config.Config, recorder analytics.Recorder) Cache {
}

// Fetch returns true if items are cached. It moves them into position as a side effect.
func (f *fsCache) Fetch(target, hash string, _unusedOutputGlobs []string) (bool, []string, int, error) {
func (f *fsCache) Fetch(target, hash string, _unusedOutputGlobs []string) (bool, []string, time.Time, int, error) {
cachedFolder := filepath.Join(f.cacheDirectory, hash)

// If it's not in the cache bail now
if !fs.PathExists(cachedFolder) {
f.logFetch(false, hash, 0)
return false, nil, 0, nil
f.logFetch(false, hash, notime, 0)
return false, nil, notime, 0, nil
}

// Otherwise, copy it into position
err := fs.RecursiveCopyOrLinkFile(cachedFolder, target, fs.DirPermissions, true, true)
if err != nil {
// TODO: what event to log here?
return false, nil, 0, fmt.Errorf("error moving artifact from cache into %v: %w", target, err)
return false, nil, notime, 0, fmt.Errorf("error moving artifact from cache into %v: %w", target, err)
}

meta, err := ReadCacheMetaFile(filepath.Join(f.cacheDirectory, hash+"-meta.json"))
if err != nil {
return false, nil, 0, fmt.Errorf("error reading cache metadata: %w", err)
return false, nil, notime, 0, fmt.Errorf("error reading cache metadata: %w", err)
}
f.logFetch(true, hash, meta.Duration)
return true, nil, meta.Duration, nil
f.logFetch(true, hash, meta.Start, meta.Duration)
return true, nil, meta.Start, meta.Duration, nil
}

func (f *fsCache) logFetch(hit bool, hash string, duration int) {
func (f *fsCache) logFetch(hit bool, hash string, start time.Time, duration int) {
var event string
if hit {
event = cacheEventHit
Expand All @@ -65,11 +67,12 @@ func (f *fsCache) logFetch(hit bool, hash string, duration int) {
Event: event,
Hash: hash,
Duration: duration,
Start: start,
}
f.recorder.LogEvent(payload)
}

func (f *fsCache) Put(target, hash string, duration int, files []string) error {
func (f *fsCache) Put(target, hash string, start time.Time, duration int, files []string) error {
g := new(errgroup.Group)

numDigesters := runtime.NumCPU()
Expand Down Expand Up @@ -108,6 +111,7 @@ func (f *fsCache) Put(target, hash string, duration int, files []string) error {
WriteCacheMetaFile(filepath.Join(f.cacheDirectory, hash+"-meta.json"), &CacheMetadata{
Duration: duration,
Hash: hash,
Start: start,
})

return nil
Expand All @@ -126,8 +130,9 @@ func (cache *fsCache) Shutdown() {}
// CacheMetadata stores duration and hash information for a cache entry so that aggregate Time Saved calculations
// can be made from artifacts from various caches
type CacheMetadata struct {
Hash string `json:"hash"`
Duration int `json:"duration"`
Hash string `json:"hash"`
Duration int `json:"duration"`
Start time.Time `json:"start"`
}

// WriteCacheMetaFile writes cache metadata file at a path
Expand Down Expand Up @@ -156,3 +161,37 @@ func ReadCacheMetaFile(path string) (*CacheMetadata, error) {
}
return &config, nil
}

// AppendHashesFile adds a hash to a file at path
// Note: naively assuming locks are not needed
func AppendHashesFile(path string, hash string) error {
file, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return err
}

defer file.Close() //nolint:golint,errcheck // nothing to do

if _, err = file.WriteString(hash + "\n"); err != nil {
return err
}

return nil
}

// ReadHashesFile reads hashes stored line by line from a file at path
func ReadHashesFile(path string) ([]string, error) {
file, err := os.Open(path)
if err != nil {
return nil, err
}

defer file.Close() //nolint:golint,errcheck // nothing to do

var hashes []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
hashes = append(hashes, scanner.Text())
}
return hashes, scanner.Err()
}
14 changes: 8 additions & 6 deletions cli/internal/cache/cache_http.go
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,8 @@ var mtime = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC)
// nobody is the usual uid / gid of the 'nobody' user.
const nobody = 65534

func (cache *httpCache) Put(target, hash string, duration int, files []string) error {
// start is ignored
func (cache *httpCache) Put(target, hash string, start time.Time, duration int, files []string) error {
// if cache.writable {
cache.requestLimiter.acquire()
defer cache.requestLimiter.release()
Expand Down Expand Up @@ -135,19 +136,19 @@ func (cache *httpCache) storeFile(tw *tar.Writer, repoRelativePath string) error
return err
}

func (cache *httpCache) Fetch(target, key string, _unusedOutputGlobs []string) (bool, []string, int, error) {
func (cache *httpCache) Fetch(target, key string, _unusedOutputGlobs []string) (bool, []string, time.Time, int, error) {
cache.requestLimiter.acquire()
defer cache.requestLimiter.release()
hit, files, duration, err := cache.retrieve(key)
if err != nil {
// TODO: analytics event?
return false, files, duration, fmt.Errorf("failed to retrieve files from HTTP cache: %w", err)
return false, files, notime, duration, fmt.Errorf("failed to retrieve files from HTTP cache: %w", err)
}
cache.logFetch(hit, key, duration)
return hit, files, duration, err
cache.logFetch(hit, key, notime, duration)
return hit, files, notime, duration, err
}

func (cache *httpCache) logFetch(hit bool, hash string, duration int) {
func (cache *httpCache) logFetch(hit bool, hash string, start time.Time, duration int) {
var event string
if hit {
event = cacheEventHit
Expand All @@ -159,6 +160,7 @@ func (cache *httpCache) logFetch(hit bool, hash string, duration int) {
Event: event,
Hash: hash,
Duration: duration,
Start: start,
}
cache.recorder.LogEvent(payload)
}
Expand Down
Loading