diff --git a/cli/cmd/turbo/main.go b/cli/cmd/turbo/main.go index 69c994c677f8c..8b3d2976bab51 100644 --- a/cli/cmd/turbo/main.go +++ b/cli/cmd/turbo/main.go @@ -1,173 +1,24 @@ package main import ( - "fmt" "os" - "runtime/debug" - "strings" - "time" - "github.com/vercel/turborepo/cli/internal/config" - "github.com/vercel/turborepo/cli/internal/info" - "github.com/vercel/turborepo/cli/internal/login" + "github.com/hashicorp/go-hclog" + "github.com/vercel/turborepo/cli/internal/cmd" "github.com/vercel/turborepo/cli/internal/process" - prune "github.com/vercel/turborepo/cli/internal/prune" - "github.com/vercel/turborepo/cli/internal/run" - "github.com/vercel/turborepo/cli/internal/ui" - uiPkg "github.com/vercel/turborepo/cli/internal/ui" - "github.com/vercel/turborepo/cli/internal/util" - - "github.com/fatih/color" - hclog "github.com/hashicorp/go-hclog" - "github.com/mitchellh/cli" ) func main() { - args := os.Args[1:] - heapFile := "" - traceFile := "" - cpuprofileFile := "" - argsEnd := 0 - for _, arg := range args { - switch { - case strings.HasPrefix(arg, "--heap="): - heapFile = arg[len("--heap="):] - case strings.HasPrefix(arg, "--trace="): - traceFile = arg[len("--trace="):] - case strings.HasPrefix(arg, "--cpuprofile="): - cpuprofileFile = arg[len("--cpuprofile="):] - default: - // Strip any arguments that were handled above - args[argsEnd] = arg - argsEnd++ - } - } - args = args[:argsEnd] - - c := cli.NewCLI("turbo", turboVersion) - - util.InitPrintf() - ui := ui.Default() - - c.Args = args - c.HelpWriter = os.Stdout - c.ErrorWriter = os.Stderr - // Parse and validate cmd line flags and env vars - // Note that cf can be nil - cf, err := config.ParseAndValidate(c.Args, ui, turboVersion) - if err != nil { - ui.Error(fmt.Sprintf("%s %s", uiPkg.ERROR_PREFIX, color.RedString(err.Error()))) - os.Exit(1) - } - - var logger hclog.Logger - if cf != nil { - logger = cf.Logger - } else { - logger = hclog.Default() - } - processes := process.NewManager(logger.Named("processes")) - signalCh := watchSignals(func() { processes.Close() }) - c.HiddenCommands = []string{"graph"} - c.Commands = map[string]cli.CommandFactory{ - "run": func() (cli.Command, error) { - return &run.RunCommand{Config: cf, Ui: ui, Processes: processes}, - nil - }, - "prune": func() (cli.Command, error) { - return &prune.PruneCommand{Config: cf, Ui: ui}, nil - }, - "link": func() (cli.Command, error) { - return &login.LinkCommand{Config: cf, Ui: ui}, nil - }, - "unlink": func() (cli.Command, error) { - return &login.UnlinkCommand{Config: cf, Ui: ui}, nil - }, - "login": func() (cli.Command, error) { - return &login.LoginCommand{Config: cf, UI: ui}, nil - }, - "logout": func() (cli.Command, error) { - return &login.LogoutCommand{Config: cf, Ui: ui}, nil - }, - "bin": func() (cli.Command, error) { - return &info.BinCommand{Config: cf, Ui: ui}, nil - }, - } - - // Capture the defer statements below so the "done" message comes last exitCode := 1 doneCh := make(chan struct{}) + processes := process.NewManager(hclog.Default().Named("processes")) + signalCh := watchSignals(func() { processes.Close() }) + func() { defer func() { close(doneCh) }() - // To view a CPU trace, use "go tool trace [file]". Note that the trace - // viewer doesn't work under Windows Subsystem for Linux for some reason. - if traceFile != "" { - if done := createTraceFile(args, traceFile); done == nil { - return - } else { - defer done() - } - } - - // To view a heap trace, use "go tool pprof [file]" and type "top". You can - // also drop it into https://speedscope.app and use the "left heavy" or - // "sandwich" view modes. - if heapFile != "" { - if done := createHeapFile(args, heapFile); done == nil { - return - } else { - defer done() - } - } - - // To view a CPU profile, drop the file into https://speedscope.app. - // Note: Running the CPU profiler doesn't work under Windows subsystem for - // Linux. The profiler has to be built for native Windows and run using the - // command prompt instead. - if cpuprofileFile != "" { - if done := createCpuprofileFile(args, cpuprofileFile); done == nil { - return - } else { - defer done() - } - } - - if cpuprofileFile != "" { - // The CPU profiler in Go only runs at 100 Hz, which is far too slow to - // return useful information for esbuild, since it's so fast. Let's keep - // running for 30 seconds straight, which should give us 3,000 samples. - seconds := 30.0 - start := time.Now() - for time.Since(start).Seconds() < seconds { - exitCode, err = c.Run() - if err != nil { - ui.Error(err.Error()) - } - } - } else { - // Don't disable the GC if this is a long-running process - isServe := false - for _, arg := range args { - if arg == "--no-gc" { - isServe = true - break - } - } - - // Disable the GC since we're just going to allocate a bunch of memory - // and then exit anyway. This speedup is not insignificant. Make sure to - // only do this here once we know that we're not going to be a long-lived - // process though. - if !isServe { - debug.SetGCPercent(-1) - } - - exitCode, err = c.Run() - if err != nil { - ui.Error(err.Error()) - } - } + exitCode = cmd.Execute(turboVersion, processes) }() + // Wait for either our command to finish, in which case we need to clean up, // or to receive a signal, in which case the signal handler above does the cleanup select { @@ -175,5 +26,6 @@ func main() { processes.Close() case <-signalCh: } + os.Exit(exitCode) } diff --git a/cli/go.mod b/cli/go.mod index 151fcb9a5946e..7f1a6388e20ae 100644 --- a/cli/go.mod +++ b/cli/go.mod @@ -1,40 +1,49 @@ module github.com/vercel/turborepo/cli -go 1.16 +go 1.17 require ( - github.com/AlecAivazis/survey/v2 v2.2.12 + github.com/AlecAivazis/survey/v2 v2.3.2 github.com/Masterminds/semver v1.5.0 - github.com/adrg/xdg v0.3.3 + github.com/adrg/xdg v0.4.0 github.com/bmatcuk/doublestar/v4 v4.0.2 - github.com/briandowns/spinner v1.16.0 - github.com/deckarep/golang-set v1.7.1 + github.com/briandowns/spinner v1.18.1 + github.com/deckarep/golang-set v1.8.0 github.com/fatih/color v1.13.0 github.com/gobwas/glob v0.2.3 - github.com/google/chrometracing v0.0.0-20210413150014-55fded0163e7 + github.com/google/chrometracing v0.0.0-20210820115312-9b2483a9dc7d github.com/google/uuid v1.3.0 - github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-gatedio v0.5.0 github.com/hashicorp/go-hclog v1.1.0 - github.com/hashicorp/go-retryablehttp v0.6.8 + github.com/hashicorp/go-retryablehttp v0.7.0 github.com/karrick/godirwalk v1.16.1 github.com/kelseyhightower/envconfig v1.4.0 - github.com/kr/text v0.2.0 // indirect github.com/mattn/go-isatty v0.0.14 - github.com/mitchellh/cli v1.1.2 github.com/mitchellh/go-homedir v1.1.0 github.com/mitchellh/mapstructure v1.4.3 - github.com/mitchellh/reflectwalk v1.0.1 // indirect - github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect github.com/pkg/errors v0.9.1 github.com/pyr-sh/dag v1.0.0 - github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f + github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 github.com/sourcegraph/go-diff v0.6.1 github.com/spf13/cobra v1.3.0 github.com/stretchr/testify v1.7.0 github.com/yosuke-furukawa/json5 v0.1.1 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect - gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b ) + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/hashicorp/errwrap v1.0.0 // indirect + github.com/hashicorp/go-cleanhttp v0.5.2 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 // indirect + github.com/mattn/go-colorable v0.1.12 // indirect + github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/sys v0.0.0-20211205182925-97ca703d548d // indirect + golang.org/x/term v0.0.0-20210503060354-a79de5458b56 // indirect + golang.org/x/text v0.3.7 // indirect +) diff --git a/cli/go.sum b/cli/go.sum index af92ceb7a0a97..1f2ca0b69f010 100644 --- a/cli/go.sum +++ b/cli/go.sum @@ -46,22 +46,18 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/AlecAivazis/survey/v2 v2.2.12 h1:5a07y93zA6SZ09gOa9wLVLznF5zTJMQ+pJ3cZK4IuO8= -github.com/AlecAivazis/survey/v2 v2.2.12/go.mod h1:6d4saEvBsfSHXeN1a5OA5m2+HJ2LuVokllnC77pAIKI= +github.com/AlecAivazis/survey/v2 v2.3.2 h1:TqTB+aDDCLYhf9/bD2TwSO8u8jDSmMUd2SUVO4gCnU8= +github.com/AlecAivazis/survey/v2 v2.3.2/go.mod h1:TH2kPCDU3Kqq7pLbnCWwZXDBjnhZtmsCle5EiYDJ2fg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/Masterminds/goutils v1.1.0 h1:zukEsf/1JZwCMgHiK3GZftabmxiCw4apj3a28RPBiVg= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8 h1:xzYJEypr/85nBpB11F9br+3HUrpgb+fcm5iADzXXYEw= github.com/Netflix/go-expect v0.0.0-20180615182759-c93bf25de8e8/go.mod h1:oX5x61PbNXchhh0oikYAH+4Pcfw5LKv21+Jnpr6r6Pc= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/adrg/xdg v0.3.3 h1:s/tV7MdqQnzB1nKY8aqHvAMD+uCiuEDzVB5HLRY849U= -github.com/adrg/xdg v0.3.3/go.mod h1:61xAR2VZcggl2St4O9ohF5qCKe08+JDmE4VNzPFQvOQ= +github.com/adrg/xdg v0.4.0 h1:RzRqFcjH4nE5C6oTAxhBtoE2IRyjBSa62SCbyPidvls= +github.com/adrg/xdg v0.4.0/go.mod h1:N6ag73EX4wyxeaoeHctc1mas01KZgsj5tYiAIwqJE/E= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -71,17 +67,15 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0 h1:ByYyxL9InA1OWqxJqqp2A5pYHUrCiAL6K3J+LKSsQkY= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bmatcuk/doublestar/v4 v4.0.2 h1:X0krlUVAVmtr2cRoTqR8aDMrDqnB36ht8wpWTiQ3jsA= github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc= -github.com/briandowns/spinner v1.16.0 h1:DFmp6hEaIx2QXXuqSJmtfSBSAjRmpGiKG6ip2Wm/yOs= -github.com/briandowns/spinner v1.16.0/go.mod h1:QOuQk7x+EaDASo80FEXwlwiA+j/PPIcX3FScO+3/ZPQ= +github.com/briandowns/spinner v1.18.1 h1:yhQmQtM1zsqFsouh09Bk/jCjd50pC3EOGsh28gLVvwY= +github.com/briandowns/spinner v1.18.1/go.mod h1:mQak9GHqbspjC/5iUx3qMlIho8xBS/ppAL/hX5SmPJU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= @@ -106,12 +100,11 @@ github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWH github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ= -github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ= +github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4= +github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS383rP6+o6qqo= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= @@ -176,8 +169,8 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/chrometracing v0.0.0-20210413150014-55fded0163e7 h1:mc1AFRocuO7EtVJgn4YIg97gBJ9VjiT4+UbCAM2IS/k= -github.com/google/chrometracing v0.0.0-20210413150014-55fded0163e7/go.mod h1:k2+go54tKjJPxWHxllhAI7WtOaxnnIaB0LjnGEsbyj0= +github.com/google/chrometracing v0.0.0-20210820115312-9b2483a9dc7d h1:jKEXtvJ5/rLLgROGAHfVY+utB+LvFftKCzKu3EpTTRI= +github.com/google/chrometracing v0.0.0-20210820115312-9b2483a9dc7d/go.mod h1:k2+go54tKjJPxWHxllhAI7WtOaxnnIaB0LjnGEsbyj0= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -221,9 +214,8 @@ github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= -github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -243,8 +235,8 @@ github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+ github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= -github.com/hashicorp/go-retryablehttp v0.6.8 h1:92lWxgpa+fF3FozM4B3UZtHZMJX8T5XT+TFdCxsPyWs= -github.com/hashicorp/go-retryablehttp v0.6.8/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= +github.com/hashicorp/go-retryablehttp v0.7.0 h1:eu1EI/mbirUgP5C8hVsTNaGZreBDlYiwC1FZWkvQPQ4= +github.com/hashicorp/go-retryablehttp v0.7.0/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -263,13 +255,9 @@ github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKEN github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174 h1:WlZsjVhE8Af9IcZDGgJGQpNflI3+MJSBhsgT5PCtzBQ= github.com/hinshun/vt10x v0.0.0-20180616224451-1954e6464174/go.mod h1:DqJ97dSdRW1W22yXSB90986pcOyQ7r45iio1KN2ez1A= -github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -291,13 +279,13 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.4 h1:5Myjjh3JY/NaAi4IsUbHADytDyl1VE1Y9PXDlL+P/VQ= github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -321,10 +309,6 @@ github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3N github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= -github.com/mitchellh/cli v1.1.2 h1:PvH+lL2B7IQ101xQL63Of8yFS2y+aDlsFcsqNc+u/Kw= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= -github.com/mitchellh/copystructure v1.0.0 h1:Laisrj+bAB6b/yJwB5Bt3ITZhGJdqmxquMKeZ+mmkFQ= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= @@ -332,17 +316,12 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1 h1:FVzMWA5RllMAKIdUSC8mdWo3XtwoecrH79BY70sEEpE= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= @@ -354,7 +333,6 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.3 h1:NP0eAhjcjImqslEwo/1hq7gpajME0fTLTezBKDqfXqo= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= @@ -374,8 +352,8 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f h1:8P2MkG70G76gnZBOPGwmMIgwBb/rESQuwsJ7K8ds4NE= -github.com/sabhiram/go-gitignore v0.0.0-20201211210132-54b8a0bf510f/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= +github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= +github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= @@ -437,8 +415,6 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -558,7 +534,6 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190530182044-ad28b68e88f1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -610,12 +585,13 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d h1:FjkYO/PPp4Wi0EAUOVLxePm7qVW4r4ctbWpURyuOD0E= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf h1:MZ2shdL+ZM/XzY3ZGOnh4Nlpnxz5GSOhOmtHo3iPU6M= -golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56 h1:b8jxX3zqjpqb2LklXPzKSGJhzyxCOZSz8ncv8Nv+y7w= +golang.org/x/term v0.0.0-20210503060354-a79de5458b56/go.mod h1:tfny5GFUkzUvx4ps4ajbZsCe5lw1metzhBm9T3x7oIY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -833,9 +809,8 @@ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -844,8 +819,6 @@ gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= diff --git a/cli/internal/api/types.go b/cli/internal/api/types.go index 94a6c1ea62d75..9279701817a18 100644 --- a/cli/internal/api/types.go +++ b/cli/internal/api/types.go @@ -1,6 +1,8 @@ package api -import "github.com/vercel/turborepo/cli/internal/fs" +import ( + "github.com/vercel/turborepo/cli/internal/fs" +) // LanguageBackend is an abstraction across programming languages and their related package managers type LanguageBackend struct { diff --git a/cli/internal/backends/backends.go b/cli/internal/backends/backends.go index ff76ffb3a5e48..d93907280f0be 100644 --- a/cli/internal/backends/backends.go +++ b/cli/internal/backends/backends.go @@ -2,6 +2,7 @@ package backends import ( "errors" + "github.com/vercel/turborepo/cli/internal/api" "github.com/vercel/turborepo/cli/internal/backends/nodejs" "github.com/vercel/turborepo/cli/internal/fs" diff --git a/cli/internal/backends/nodejs/nodejs.go b/cli/internal/backends/nodejs/nodejs.go index 1c8cff76f143e..af37a782dcc9f 100644 --- a/cli/internal/backends/nodejs/nodejs.go +++ b/cli/internal/backends/nodejs/nodejs.go @@ -8,7 +8,6 @@ import ( "github.com/vercel/turborepo/cli/internal/api" "github.com/vercel/turborepo/cli/internal/fs" "github.com/vercel/turborepo/cli/internal/util" - "gopkg.in/yaml.v3" ) @@ -115,7 +114,6 @@ var NodejsBerryBackend = api.LanguageBackend{ return true, nil } } else { - specfileExists := fs.FileExists(filepath.Join(cwd, backend.Specfile)) lockfileExists := fs.FileExists(filepath.Join(cwd, backend.Lockfile)) @@ -125,13 +123,6 @@ var NodejsBerryBackend = api.LanguageBackend{ } if specfileExists && lockfileExists && isBerry { - isNMLinker, err := util.IsNMLinker(cwd) - if err != nil { - return false, fmt.Errorf("could not check if yarn is using nm-linker: %w", err) - } else if !isNMLinker { - return false, fmt.Errorf("only yarn nm-linker is supported") - } - return true, nil } } diff --git a/cli/internal/cache/cache.go b/cli/internal/cache/cache.go index e1ecb93685994..1378624f70917 100644 --- a/cli/internal/cache/cache.go +++ b/cli/internal/cache/cache.go @@ -46,7 +46,7 @@ func newSyncCache(config *config.Config, remoteOnly bool, recorder analytics.Rec if config.Cache.Dir != "" && !remoteOnly { mplex.caches = append(mplex.caches, newFsCache(config, recorder)) } - if config.IsLoggedIn() { + if config.IsAuthenticated() { fmt.Println(ui.Dim("• Remote computation caching enabled (experimental)")) mplex.caches = append(mplex.caches, newHTTPCache(config, recorder)) } diff --git a/cli/internal/cache/cache_http.go b/cli/internal/cache/cache_http.go index e8f68deaaa5cc..cc513caa5a68c 100644 --- a/cli/internal/cache/cache_http.go +++ b/cli/internal/cache/cache_http.go @@ -6,7 +6,7 @@ import ( "fmt" "io" "io/ioutil" - log "log" + "log" "net/http" "os" "path" diff --git a/cli/internal/cmd/auth/link.go b/cli/internal/cmd/auth/link.go new file mode 100644 index 0000000000000..41d344f9ce8d5 --- /dev/null +++ b/cli/internal/cmd/auth/link.go @@ -0,0 +1,149 @@ +package auth + +import ( + "os/exec" + "path/filepath" + "strings" + + "github.com/AlecAivazis/survey/v2" + "github.com/mitchellh/go-homedir" + "github.com/spf13/cobra" + "github.com/vercel/turborepo/cli/internal/client" + "github.com/vercel/turborepo/cli/internal/cmdutil" + "github.com/vercel/turborepo/cli/internal/config" + "github.com/vercel/turborepo/cli/internal/fs" + "github.com/vercel/turborepo/cli/internal/ui" + "github.com/vercel/turborepo/cli/internal/util" +) + +func LinkCmd(ch *cmdutil.Helper) *cobra.Command { + var opts struct { + noGitignore bool + } + + cmd := &cobra.Command{ + Use: "link", + Short: "Link your local directory to a Vercel organization and enable remote caching", + RunE: func(cmd *cobra.Command, args []string) error { + shouldSetup := true + dir, homeDirErr := homedir.Dir() + if homeDirErr != nil { + return ch.LogError("could not find home directory.\n%w", homeDirErr) + } + + ch.Logger.Printf(">>> Remote Caching (beta)") + ch.Logger.Printf("") + ch.Logger.Printf(" Remote Caching shares your cached Turborepo task outputs and logs") + ch.Logger.Printf(" across all your team’s Vercel projects. It also can share outputs") + ch.Logger.Printf(" with other services that enable Remote Caching, like CI/CD systems.") + ch.Logger.Printf(" This results in faster build times and deployments for your team.") + ch.Logger.Printf(" For more info, see ${UNDERLINE}https://turborepo.org/docs/features/remote-caching${RESET}") + ch.Logger.Printf("") + currentDir, fpErr := filepath.Abs(".") + if fpErr != nil { + return ch.LogError("could figure out file path.\n%w", fpErr) + } + + survey.AskOne( + &survey.Confirm{ + Default: true, + Message: util.Sprintf("Would you like to enable Remote Caching for ${CYAN}${BOLD}\"%s\"${RESET}?", strings.Replace(currentDir, dir, "~", 1)), + }, + &shouldSetup, survey.WithValidator(survey.Required), + survey.WithIcons(func(icons *survey.IconSet) { + // for more information on formatting the icons, see here: https://github.com/mgutz/ansi#style-format + icons.Question.Format = "gray+hb" + })) + + if !shouldSetup { + ch.Logger.Printf("> Canceled.") + return nil + } + + if ch.Config.Token == "" { + return ch.LogError("user not found. Please login to Turborepo first by running ${BOLD}`npx turbo login`${RESET}.") + } + + teamsResponse, err := ch.Config.ApiClient.GetTeams() + if err != nil { + return ch.LogError("could not get team information.\n%w", err) + } + userResponse, err := ch.Config.ApiClient.GetUser() + if err != nil { + return ch.LogError("could not get user information.\n%w", err) + } + + var chosenTeam client.Team + + teamOptions := make([]string, len(teamsResponse.Teams)) + + // Gather team options + for i, team := range teamsResponse.Teams { + teamOptions[i] = team.Name + } + + var chosenTeamName string + nameWithFallback := userResponse.User.Name + if nameWithFallback == "" { + nameWithFallback = userResponse.User.Username + } + survey.AskOne( + &survey.Select{ + Message: "Which Vercel scope (and Remote Cache) do you want associate with this Turborepo? ", + Options: append([]string{nameWithFallback}, teamOptions...), + }, + &chosenTeamName, + survey.WithValidator(survey.Required), + survey.WithIcons(func(icons *survey.IconSet) { + // for more information on formatting the icons, see here: https://github.com/mgutz/ansi#style-format + icons.Question.Format = "gray+hb" + })) + + if chosenTeamName == "" { + ch.Logger.Printf("Canceled. Turborepo not set up.") + return nil + } else if (chosenTeamName == userResponse.User.Name) || (chosenTeamName == userResponse.User.Username) { + chosenTeam = client.Team{ + ID: userResponse.User.ID, + Name: userResponse.User.Name, + Slug: userResponse.User.Username, + } + } else { + for _, team := range teamsResponse.Teams { + if team.Name == chosenTeamName { + chosenTeam = team + break + } + } + } + fs.EnsureDir(filepath.Join(".turbo", "config.json")) + fsErr := config.WriteRepoConfigFile(&config.TurborepoConfig{ + TeamId: chosenTeam.ID, + ApiUrl: ch.Config.ApiUrl, + }) + if fsErr != nil { + return ch.LogError("could not link current directory to team/user.\n%w", fsErr) + } + + if !opts.noGitignore { + fs.EnsureDir(".gitignore") + _, gitIgnoreErr := exec.Command("sh", "-c", "grep -qxF '.turbo' .gitignore || echo '.turbo' >> .gitignore").CombinedOutput() + if err != nil { + return ch.LogError("could not find or update .gitignore.\n%w", gitIgnoreErr) + } + } + + ch.Logger.Printf("") + ch.Logger.Printf("%s${RESET} Turborepo CLI authorized for ${BOLD}%s${RESET}", ui.Rainbow(">>> Success!"), chosenTeam.Name) + ch.Logger.Printf("") + ch.Logger.Printf("${GREY}To disable Remote Caching, run `npx turbo unlink`${RESET}") + ch.Logger.Printf("") + + return nil + }, + } + + cmd.Flags().BoolVarP(&opts.noGitignore, "no-gitignore", "n", false, "do not create or modify .gitignore") + + return cmd +} diff --git a/cli/internal/cmd/auth/login.go b/cli/internal/cmd/auth/login.go new file mode 100644 index 0000000000000..6cd0f9de19701 --- /dev/null +++ b/cli/internal/cmd/auth/login.go @@ -0,0 +1,300 @@ +package auth + +import ( + "context" + "fmt" + "net" + "net/http" + "net/url" + "os" + "os/signal" + + "github.com/pkg/errors" + "github.com/spf13/cobra" + "github.com/vercel/turborepo/cli/internal/cmdutil" + "github.com/vercel/turborepo/cli/internal/config" + "github.com/vercel/turborepo/cli/internal/ui" + "github.com/vercel/turborepo/cli/internal/util/browser" +) + +const ( + defaultHostname = "127.0.0.1" + defaultPort = 9789 + defaultSSOProvider = "SAML/OIDC Single Sign-On" +) + +type oneShotServer struct { + Port uint16 + requestDone chan struct{} + serverDone chan struct{} + serverErr error + ctx context.Context + srv *http.Server +} + +type deps struct { + ch *cmdutil.Helper + openUrl func(string) error +} + +func LoginCmd(ch *cmdutil.Helper) *cobra.Command { + var opts struct { + ssoTeam string + } + + cmd := &cobra.Command{ + Use: "login", + Short: "Login to your Vercel account", + RunE: func(cmd *cobra.Command, args []string) error { + if opts.ssoTeam != "" { + return loginSSO(deps{ + ch: ch, + openUrl: browser.OpenBrowser, + }, opts.ssoTeam) + } else { + return login(deps{ + ch: ch, + openUrl: browser.OpenBrowser, + }) + } + }, + } + + cmd.Flags().StringVar(&opts.ssoTeam, "sso-team", "", "attempt to authenticate to the specified team using SSO") + + return cmd +} + +func login(deps deps) error { + ch := deps.ch + + ch.Config.Logger.Debug(fmt.Sprintf("turbo v%v", ch.Config.Version)) + ch.Config.Logger.Debug(fmt.Sprintf("api url: %v", ch.Config.ApiUrl)) + ch.Config.Logger.Debug(fmt.Sprintf("login url: %v", ch.Config.LoginUrl)) + redirectURL := fmt.Sprintf("http://%v:%v", defaultHostname, defaultPort) + loginURL := fmt.Sprintf("%v/turborepo/token?redirect_uri=%v", ch.Config.LoginUrl, redirectURL) + ch.Logger.Printf(">>> Opening browser to %v", ch.Config.LoginUrl) + + rootctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + + var query url.Values + oss, err := newOneShotServer(rootctx, func(w http.ResponseWriter, r *http.Request) { + query = r.URL.Query() + http.Redirect(w, r, ch.Config.LoginUrl+"/turborepo/success", http.StatusFound) + }, defaultPort) + if err != nil { + return ch.LogError("failed to start local server: %w", err) + } + + s := ui.NewSpinner(os.Stdout) + err = deps.openUrl(loginURL) + if err != nil { + return ch.LogError("failed to open %v: %w", loginURL, err) + } + s.Start("Waiting for your authorization...") + err = oss.Wait() + if err != nil { + return ch.LogError("failed to shut down local server: %w", err) + } + // Stop the spinner before we return to ensure terminal is left in a good state + s.Stop("") + + rawToken := query.Get("token") + config.WriteUserConfigFile(&config.TurborepoConfig{Token: rawToken}) + ch.Config.ApiClient.SetToken(rawToken) + + userResponse, err := ch.Config.ApiClient.GetUser() + if err != nil { + return ch.LogError("could not get user information: %w", err) + } + + ch.Logger.Printf("") + ch.Logger.Printf("%s Turborepo CLI authorized for %s${RESET}", ui.Rainbow(">>> Success!"), userResponse.User.Email) + ch.Logger.Printf("") + ch.Logger.Printf("${CYAN}To connect to your Remote Cache. Run the following in the${RESET}") + ch.Logger.Printf("${CYAN}root of any turborepo:${RESET}") + ch.Logger.Printf("") + ch.Logger.Printf(" ${BOLD}npx turbo link${RESET}") + ch.Logger.Printf("") + + return nil +} + +func loginSSO(deps deps, ssoTeam string) error { + ch := deps.ch + + redirectURL := fmt.Sprintf("http://%v:%v", defaultHostname, defaultPort) + query := make(url.Values) + query.Add("teamId", ssoTeam) + query.Add("mode", "login") + query.Add("next", redirectURL) + loginURL := fmt.Sprintf("%v/api/auth/sso?%v", ch.Config.LoginUrl, query.Encode()) + + rootctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) + defer cancel() + + var verificationToken string + oss, err := newOneShotServer(rootctx, func(w http.ResponseWriter, r *http.Request) { + token, location := getTokenAndRedirect(r.URL.Query()) + verificationToken = token + http.Redirect(w, r, location, http.StatusFound) + }, defaultPort) + if err != nil { + return ch.LogError("failed to start local server: %v", err) + } + + s := ui.NewSpinner(os.Stdout) + err = deps.openUrl(loginURL) + if err != nil { + return ch.LogError("failed to open %v: %w", loginURL, err) + } + + s.Start("Waiting for your authorization...") + err = oss.Wait() + if err != nil { + return ch.LogError("failed to shut down local server: %w", err) + } + + // Stop the spinner before we return to ensure terminal is left in a good state + s.Stop("") + // open https://vercel.com/api/auth/sso?teamId=&mode=login + if verificationToken == "" { + return ch.LogError("no token auth token found") + } + + // We now have a verification token. We need to pass it to the verification endpoint + // to get an actual token. + tokenName, err := makeTokenName() + if err != nil { + return ch.LogError("failed to make sso token name: %w", err) + } + verifiedUser, err := ch.Config.ApiClient.VerifySSOToken(verificationToken, tokenName) + if err != nil { + return ch.LogError("failed to verify SSO token: %w", err) + } + + ch.Config.ApiClient.SetToken(verifiedUser.Token) + userResponse, err := ch.Config.ApiClient.GetUser() + if err != nil { + return ch.LogError("could not get user information: %w", err) + } + + err = config.WriteUserConfigFile(&config.TurborepoConfig{Token: verifiedUser.Token}) + if err != nil { + return ch.LogError("failed to save auth token: %w", err) + } + + ch.Logger.Printf("") + ch.Logger.Printf("%s Turborepo CLI authorized for %s${RESET}", ui.Rainbow(">>> Success!"), userResponse.User.Email) + ch.Logger.Printf("") + + if verifiedUser.TeamID != "" { + err = config.WriteRepoConfigFile(&config.TurborepoConfig{TeamId: verifiedUser.TeamID, ApiUrl: ch.Config.ApiUrl}) + if err != nil { + return ch.LogError("failed to save teamId: %w", err) + } + } else { + ch.Logger.Printf("${CYAN}To connect to your Remote Cache. Run the following in the${RESET}") + ch.Logger.Printf("${CYAN}root of any turborepo:${RESET}") + ch.Logger.Printf("") + ch.Logger.Printf(" ${BOLD}npx turbo link${RESET}") + ch.Logger.Printf("") + } + + return nil +} + +func getTokenAndRedirect(params url.Values) (string, string) { + locationStub := "https://vercel.com/notifications/cli-login-" + if loginError := params.Get("loginError"); loginError != "" { + outParams := make(url.Values) + outParams.Add("loginError", loginError) + return "", locationStub + "failed?" + outParams.Encode() + } + if ssoEmail := params.Get("ssoEmail"); ssoEmail != "" { + outParams := make(url.Values) + outParams.Add("ssoEmail", ssoEmail) + if teamName := params.Get("teamName"); teamName != "" { + outParams.Add("teamName", teamName) + } + if ssoType := params.Get("ssoType"); ssoType != "" { + outParams.Add("ssoType", ssoType) + } + return "", locationStub + "incomplete?" + outParams.Encode() + } + token := params.Get("token") + location := locationStub + "success" + if email := params.Get("email"); email != "" { + outParams := make(url.Values) + outParams.Add("email", email) + location += "?" + outParams.Encode() + } + return token, location +} + +func newOneShotServer(ctx context.Context, handler http.HandlerFunc, port uint16) (*oneShotServer, error) { + requestDone := make(chan struct{}) + serverDone := make(chan struct{}) + mux := http.NewServeMux() + srv := &http.Server{Handler: mux} + oss := &oneShotServer{ + Port: port, + requestDone: requestDone, + serverDone: serverDone, + ctx: ctx, + srv: srv, + } + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + handler(w, r) + close(oss.requestDone) + }) + err := oss.start(handler) + if err != nil { + return nil, err + } + return oss, nil +} + +func (oss *oneShotServer) start(handler http.HandlerFunc) error { + // Start listening immediately to handle race with user interaction + // This is mostly for testing, but would otherwise still technically be + // a race condition. + addr := defaultHostname + ":" + fmt.Sprint(oss.Port) + l, err := net.Listen("tcp", addr) + if err != nil { + return err + } + go func() { + if err := oss.srv.Serve(l); err != nil && !errors.Is(err, http.ErrServerClosed) { + oss.serverErr = fmt.Errorf("could not activate device. Please try again: %w", err) + } + close(oss.serverDone) + }() + return nil +} + +func (oss *oneShotServer) Wait() error { + select { + case <-oss.requestDone: + case <-oss.ctx.Done(): + } + return oss.closeServer() +} + +func (oss *oneShotServer) closeServer() error { + err := oss.srv.Shutdown(oss.ctx) + if err != nil { + return err + } + <-oss.serverDone + return oss.serverErr +} + +func makeTokenName() (string, error) { + host, err := os.Hostname() + if err != nil { + return "", err + } + return fmt.Sprintf("Turbo CLI on %v via %v", host, defaultSSOProvider), nil +} diff --git a/cli/internal/cmd/auth/login_test.go b/cli/internal/cmd/auth/login_test.go new file mode 100644 index 0000000000000..4f6b82936eaf8 --- /dev/null +++ b/cli/internal/cmd/auth/login_test.go @@ -0,0 +1,159 @@ +package auth + +import ( +// "fmt" +// "net/http" +// "net/url" +// "os" +// "testing" + +// "github.com/hashicorp/go-hclog" +// "github.com/vercel/turborepo/cli/internal/client" +// "github.com/vercel/turborepo/cli/internal/config" +// "github.com/vercel/turborepo/cli/internal/ui" +) + +// type dummyClient struct { +// setToken string +// createdSSOTokenName string +// } + +// func (d *dummyClient) SetToken(t string) { +// d.setToken = t +// } + +// func (d *dummyClient) GetUser() (*client.UserResponse, error) { +// return &client.UserResponse{}, nil +// } + +// func (d *dummyClient) VerifySSOToken(token string, tokenName string) (*client.VerifiedSSOUser, error) { +// d.createdSSOTokenName = tokenName +// return &client.VerifiedSSOUser{ +// Token: "actual-sso-token", +// TeamID: "sso-team-id", +// }, nil +// } + +// var logger = hclog.Default() +// var cf = &config.Config{ +// Logger: logger, +// TurboVersion: "test", +// ApiUrl: "api-url", +// LoginUrl: "login-url", +// } + +// type testResult struct { +// clientErr error +// userConfigWritten *config.TurborepoConfig +// repoConfigWritten *config.TurborepoConfig +// clientTokenWritten string +// openedURL string +// stepCh chan struct{} +// client dummyClient +// } + +// func (tr *testResult) Deps() loginDeps { +// urlOpener := func(url string) error { +// tr.openedURL = url +// tr.stepCh <- struct{}{} +// return nil +// } +// return loginDeps{ +// ui: ui.Default(), +// openURL: urlOpener, +// client: &tr.client, +// writeUserConfig: func(cf *config.TurborepoConfig) error { +// tr.userConfigWritten = cf +// return nil +// }, +// writeRepoConfig: func(cf *config.TurborepoConfig) error { +// tr.repoConfigWritten = cf +// return nil +// }, +// } +// } + +// func newTest(redirectedURL string) *testResult { +// stepCh := make(chan struct{}, 1) +// tr := &testResult{ +// stepCh: stepCh, +// } +// // When it's time, do the redirect +// go func() { +// <-tr.stepCh +// client := &http.Client{ +// CheckRedirect: func(req *http.Request, via []*http.Request) error { +// return http.ErrUseLastResponse +// }, +// } +// resp, err := client.Get(redirectedURL) +// if err != nil { +// tr.clientErr = err +// } else if resp != nil && resp.StatusCode != http.StatusFound { +// tr.clientErr = fmt.Errorf("invalid status %v", resp.StatusCode) +// } +// tr.stepCh <- struct{}{} +// }() +// return tr +// } + +// func Test_run(t *testing.T) { +// test := newTest("http://127.0.0.1:9789/?token=my-token") +// err := run(cf, test.Deps()) +// if err != nil { +// t.Errorf("expected to succeed, got error %v", err) +// } +// <-test.stepCh +// if test.clientErr != nil { +// t.Errorf("test client had error %v", test.clientErr) +// } + +// expectedURL := "login-url/turborepo/token?redirect_uri=http://127.0.0.1:9789" +// if test.openedURL != expectedURL { +// t.Errorf("openedURL got %v, want %v", test.openedURL, expectedURL) +// } + +// if test.userConfigWritten.Token != "my-token" { +// t.Errorf("config token got %v, want my-token", test.userConfigWritten.Token) +// } +// if test.client.setToken != "my-token" { +// t.Errorf("user client token got %v, want my-token", test.client.setToken) +// } +// } + +// func Test_sso(t *testing.T) { +// redirectParams := make(url.Values) +// redirectParams.Add("token", "verification-token") +// redirectParams.Add("email", "test@example.com") +// test := newTest("http://127.0.0.1:9789/?" + redirectParams.Encode()) +// err := loginSSO(cf, "my-team", test.Deps()) +// if err != nil { +// t.Errorf("expected to succeed, got error %v", err) +// } +// <-test.stepCh +// if test.clientErr != nil { +// t.Errorf("test client had error %v", test.clientErr) +// } +// host, err := os.Hostname() +// if err != nil { +// t.Errorf("failed to get hostname %v", err) +// } +// expectedTokenName := fmt.Sprintf("Turbo CLI on %v via SAML/OIDC Single Sign-On", host) +// if test.client.createdSSOTokenName != expectedTokenName { +// t.Errorf("created sso token got %v want %v", test.client.createdSSOTokenName, expectedTokenName) +// } +// expectedToken := "actual-sso-token" +// if test.client.setToken != expectedToken { +// t.Errorf("user client token got %v, want %v", test.client.setToken, expectedToken) +// } +// if test.userConfigWritten.Token != expectedToken { +// t.Errorf("user config token got %v want %v", test.userConfigWritten.Token, expectedToken) +// } +// expectedTeamID := "sso-team-id" +// if test.repoConfigWritten.TeamId != expectedTeamID { +// t.Errorf("repo config team id got %v want %v", test.repoConfigWritten.TeamId, expectedTeamID) +// } +// if test.repoConfigWritten.Token != "" { +// t.Errorf("repo config file token, got %v want empty string", test.repoConfigWritten.Token) +// } +// } \ No newline at end of file diff --git a/cli/internal/cmd/auth/logout.go b/cli/internal/cmd/auth/logout.go new file mode 100644 index 0000000000000..dcebfbfdbdf37 --- /dev/null +++ b/cli/internal/cmd/auth/logout.go @@ -0,0 +1,24 @@ +package auth + +import ( + "github.com/spf13/cobra" + "github.com/vercel/turborepo/cli/internal/cmdutil" + "github.com/vercel/turborepo/cli/internal/config" +) + +func LogoutCmd(ch *cmdutil.Helper) *cobra.Command { + cmd := &cobra.Command{ + Use: "logout", + Short: "Logout of your Vercel account", + RunE: func(cmd *cobra.Command, args []string) error { + if err := config.DeleteUserConfigFile(); err != nil { + return ch.LogError("could not logout. Something went wrong: %w", err) + } + + ch.Logger.Printf("${GREY}>>> Logged out${RESET}") + return nil + }, + } + + return cmd +} diff --git a/cli/internal/cmd/auth/unlink.go b/cli/internal/cmd/auth/unlink.go new file mode 100644 index 0000000000000..0627193d80a46 --- /dev/null +++ b/cli/internal/cmd/auth/unlink.go @@ -0,0 +1,24 @@ +package auth + +import ( + "github.com/spf13/cobra" + "github.com/vercel/turborepo/cli/internal/cmdutil" + "github.com/vercel/turborepo/cli/internal/config" +) + +func UnlinkCmd(ch *cmdutil.Helper) *cobra.Command { + cmd := &cobra.Command{ + Use: "unlink", + Short: "Unlink the current directory from your Vercel organization and disable Remote Caching (beta)", + RunE: func(cmd *cobra.Command, args []string) error { + if err := config.WriteRepoConfigFile(&config.TurborepoConfig{}); err != nil { + return ch.LogError("could not unlink. Something went wrong: %w", err) + } + + ch.Logger.Printf("${GREY}> Disabled Remote Caching${RESET}") + return nil + }, + } + + return cmd +} diff --git a/cli/internal/cmd/info/bin.go b/cli/internal/cmd/info/bin.go new file mode 100644 index 0000000000000..e20b684e11742 --- /dev/null +++ b/cli/internal/cmd/info/bin.go @@ -0,0 +1,26 @@ +package info + +import ( + "os" + + "github.com/spf13/cobra" + "github.com/vercel/turborepo/cli/internal/cmdutil" +) + +func BinCmd(ch *cmdutil.Helper) *cobra.Command { + cmd := &cobra.Command{ + Use: "bin", + Short: "Get the path to the Turbo binary", + RunE: func(cmd *cobra.Command, args []string) error { + path, err := os.Executable() + if err != nil { + return ch.LogError("could not get path to turbo binary: %w", err) + } + + ch.Logger.Printf(path) + return nil + }, + } + + return cmd +} diff --git a/cli/internal/cmd/prune/prune.go b/cli/internal/cmd/prune/prune.go new file mode 100644 index 0000000000000..b951676e04b81 --- /dev/null +++ b/cli/internal/cmd/prune/prune.go @@ -0,0 +1,225 @@ +package prune + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + + "github.com/spf13/cobra" + "github.com/vercel/turborepo/cli/internal/cmdutil" + "github.com/vercel/turborepo/cli/internal/context" + "github.com/vercel/turborepo/cli/internal/fs" + "github.com/vercel/turborepo/cli/internal/ui" + "github.com/vercel/turborepo/cli/internal/util" + "gopkg.in/yaml.v3" +) + +func PruneCmd(ch *cmdutil.Helper) *cobra.Command { + var opts struct { + scope string + docker bool + cwd string + } + + cmd := &cobra.Command{ + Use: "prune", + Short: "Prepare a subset of your monorepo", + RunE: func(cmd *cobra.Command, args []string) error { + logger := log.New(os.Stdout, "", 0) + + ctx, err := context.New(context.WithGraph(opts.cwd, ch.Config)) + if err != nil { + return ch.LogError("could not construct graph: %w", err) + } + + ch.Config.Logger.Trace("scope", "value", opts.scope) + target, scopeIsValid := ctx.PackageInfos[opts.scope] + if !scopeIsValid { + return ch.LogError("invalid scope: package not found") + } + + ch.Config.Logger.Trace("target", "value", target.Name) + ch.Config.Logger.Trace("directory", "value", target.Dir) + ch.Config.Logger.Trace("external deps", "value", target.UnresolvedExternalDeps) + ch.Config.Logger.Trace("internal deps", "value", target.InternalDeps) + ch.Config.Logger.Trace("docker", "value", opts.docker) + ch.Config.Logger.Trace("out dir", "value", filepath.Join(opts.cwd, "out")) + + if !util.IsYarn(ctx.Backend.Name) { + return ch.LogError("this command is not yet implemented for %s", ctx.Backend.Name) + } else if ctx.Backend.Name == "nodejs-berry" { + isNMLinker, err := util.IsNMLinker(opts.cwd) + if err != nil { + return ch.LogError("could not determine if yarn is using `nodeLinker: node-modules`: %w", err) + } else if !isNMLinker { + return ch.LogError("only yarn v2/v3 with `nodeLinker: node-modules` is supported at this time") + } + } + + logger.Printf("Generating pruned monorepo for %v in %v", ui.Bold(opts.scope), ui.Bold(filepath.Join(opts.cwd, "out"))) + + err = fs.EnsureDir(filepath.Join(opts.cwd, "out", "package.json")) + if err != nil { + return ch.LogError("could not create directory: %w", err) + } + workspaces := []string{} + lockfile := ctx.RootPackageInfo.SubLockfile + targets := []interface{}{opts.scope} + internalDeps, err := ctx.TopologicalGraph.Ancestors(opts.scope) + if err != nil { + return ch.LogError("could find traverse the dependency graph to find topological dependencies: %w", err) + } + targets = append(targets, internalDeps.List()...) + + for _, internalDep := range targets { + if internalDep == ctx.RootNode { + continue + } + workspaces = append(workspaces, ctx.PackageInfos[internalDep].Dir) + if opts.docker { + targetDir := filepath.Join(opts.cwd, "out", "full", ctx.PackageInfos[internalDep].Dir) + jsonDir := filepath.Join(opts.cwd, "out", "json", ctx.PackageInfos[internalDep].PackageJSONPath) + if err := fs.EnsureDir(targetDir); err != nil { + return ch.LogError("failed to create folder %v for %v: %w", targetDir, internalDep, err) + } + if err := fs.RecursiveCopy(ctx.PackageInfos[internalDep].Dir, targetDir, fs.DirPermissions); err != nil { + return ch.LogError("failed to copy %v into %v: %w", internalDep, targetDir, err) + } + if err := fs.EnsureDir(jsonDir); err != nil { + return ch.LogError("failed to create folder %v for %v: %w", jsonDir, internalDep, err) + } + if err := fs.RecursiveCopy(ctx.PackageInfos[internalDep].PackageJSONPath, jsonDir, fs.DirPermissions); err != nil { + return ch.LogError("failed to copy %v into %v: %w", internalDep, jsonDir, err) + } + } else { + targetDir := filepath.Join(opts.cwd, "out", ctx.PackageInfos[internalDep].Dir) + if err := fs.EnsureDir(targetDir); err != nil { + return ch.LogError("failed to create folder %v for %v: %w", targetDir, internalDep, err) + } + if err := fs.RecursiveCopy(ctx.PackageInfos[internalDep].Dir, targetDir, fs.DirPermissions); err != nil { + return ch.LogError("failed to copy %v into %v: %w", internalDep, targetDir, err) + } + } + + for k, v := range ctx.PackageInfos[internalDep].SubLockfile { + lockfile[k] = v + } + + logger.Printf(" - Added %v", ctx.PackageInfos[internalDep].Name) + } + ch.Config.Logger.Trace("new workspaces", "value", workspaces) + if opts.docker { + if fs.FileExists(".gitignore") { + if err := fs.CopyFile(".gitignore", filepath.Join(opts.cwd, "out", "full", ".gitignore"), fs.DirPermissions); err != nil { + return ch.LogError("failed to copy root .gitignore: %w", err) + } + } + // We only need to actually copy turbo.json into "full" folder since it isn't needed for installation in docker + if fs.FileExists("turbo.json") { + if err := fs.CopyFile("turbo.json", filepath.Join(opts.cwd, "out", "full", "turbo.json"), fs.DirPermissions); err != nil { + return ch.LogError("failed to copy root turbo.json: %w", err) + } + } + + if err := fs.CopyFile("package.json", filepath.Join(opts.cwd, "out", "full", "package.json"), fs.DirPermissions); err != nil { + return ch.LogError("failed to copy root package.json: %w", err) + } + + if err := fs.CopyFile("package.json", filepath.Join(opts.cwd, "out", "json", "package.json"), fs.DirPermissions); err != nil { + return ch.LogError("failed to copy root package.json: %w", err) + } + } else { + if fs.FileExists(".gitignore") { + if err := fs.CopyFile(".gitignore", filepath.Join(opts.cwd, "out", ".gitignore"), fs.DirPermissions); err != nil { + return ch.LogError("failed to copy root .gitignore: %w", err) + } + } + + if fs.FileExists("turbo.json") { + if err := fs.CopyFile("turbo.json", filepath.Join(opts.cwd, "out", "turbo.json"), fs.DirPermissions); err != nil { + return ch.LogError("failed to copy root turbo.json: %w", err) + } + } + + if err := fs.CopyFile("package.json", filepath.Join(opts.cwd, "out", "package.json"), fs.DirPermissions); err != nil { + return ch.LogError("failed to copy root package.json: %w", err) + } + } + + var b bytes.Buffer + yamlEncoder := yaml.NewEncoder(&b) + yamlEncoder.SetIndent(2) // this is what you're looking for + yamlEncoder.Encode(lockfile) + + if err != nil { + return ch.LogError("failed to materialize sub-lockfile. This can happen if your lockfile contains merge conflicts or is somehow corrupted. Please report this if it occurs: %w", err) + } + err = ioutil.WriteFile(filepath.Join(opts.cwd, "out", "yarn.lock"), b.Bytes(), fs.DirPermissions) + if err != nil { + return ch.LogError("failed to write sub-lockfile: %w", err) + } + + tmpGeneratedLockfile, err := os.Create(filepath.Join(opts.cwd, "out", "yarn-tmp.lock")) + tmpGeneratedLockfileWriter := bufio.NewWriter(tmpGeneratedLockfile) + if err != nil { + return ch.LogError("failed create temporary lockfile: %w", err) + } + + if ctx.Backend.Name == "nodejs-yarn" { + tmpGeneratedLockfileWriter.WriteString("# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.\n# yarn lockfile v1\n\n") + } else { + tmpGeneratedLockfileWriter.WriteString("# This file is generated by running \"yarn install\" inside your project.\n# Manual changes might be lost - proceed with caution!\n\n__metadata:\nversion: 5\ncacheKey: 8\n\n") + } + + // because of yarn being yarn, we need to inject lines in between each block of YAML to make it "valid" SYML + generatedLockfile, err := os.Open(filepath.Join(filepath.Join(opts.cwd, "out", "yarn.lock"))) + if err != nil { + return ch.LogError("failed to massage lockfile: %w", err) + } + + scan := bufio.NewScanner(generatedLockfile) + buf := make([]byte, 0, 1024*1024) + scan.Buffer(buf, 10*1024*1024) + for scan.Scan() { + line := scan.Text() //Writing to Stdout + if !strings.HasPrefix(line, " ") { + tmpGeneratedLockfileWriter.WriteString(fmt.Sprintf("\n%v\n", strings.ReplaceAll(line, "'", "\""))) + } else { + tmpGeneratedLockfileWriter.WriteString(fmt.Sprintf("%v\n", strings.ReplaceAll(line, "'", "\""))) + } + } + // Make sure to flush the log write before we start saving it. + tmpGeneratedLockfileWriter.Flush() + + // Close the files before we rename them + tmpGeneratedLockfile.Close() + generatedLockfile.Close() + + // Rename the file + err = os.Rename(filepath.Join(opts.cwd, "out", "yarn-tmp.lock"), filepath.Join(opts.cwd, "out", "yarn.lock")) + if err != nil { + return ch.LogError("failed finalize lockfile: %w", err) + } + + return nil + }, + } + + path, err := os.Getwd() + if err != nil { + return nil + } + + cmd.Flags().StringVar(&opts.scope, "scope", "", "package to act as entry point for pruned monorepo") + cmd.Flags().BoolVarP(&opts.docker, "docker", "d", false, "output pruned workspace into 'full' and 'json' directories optimized for Docker layer caching") + cmd.Flags().StringVar(&opts.cwd, "cwd", path, "directory to execute command in") + + cmd.MarkFlagRequired("scope") + + return cmd +} diff --git a/cli/internal/cmd/root.go b/cli/internal/cmd/root.go new file mode 100644 index 0000000000000..adfc4d91c5d11 --- /dev/null +++ b/cli/internal/cmd/root.go @@ -0,0 +1,114 @@ +package cmd + +import ( + "errors" + "os" + "strings" + "time" + + "github.com/spf13/cobra" + "github.com/vercel/turborepo/cli/internal/cmd/auth" + "github.com/vercel/turborepo/cli/internal/cmd/info" + "github.com/vercel/turborepo/cli/internal/cmd/prune" + "github.com/vercel/turborepo/cli/internal/cmd/run" + "github.com/vercel/turborepo/cli/internal/cmdutil" + "github.com/vercel/turborepo/cli/internal/config" + "github.com/vercel/turborepo/cli/internal/logger" + "github.com/vercel/turborepo/cli/internal/process" +) + +var rootCmd = &cobra.Command{ + Use: "turbo []", + Short: "Turborepo is a very fast Javascript build tool", + Long: `The High-performance Build System for JavaScript & TypeScript Codebases. +Complete documentation is available at https://turborepo.com.`, +} + +func Execute(version string, processes *process.Manager) int { + logger := logger.New() + + err := runCmd(logger, version, processes) + if err == nil { + return 0 + } + + logger.Printf(err.Error()) + + var cmdErr *cmdutil.Error + if errors.As(err, &cmdErr) { + return cmdErr.ExitCode + } + + return 1 +} + +func runCmd(logger *logger.Logger, version string, processes *process.Manager) error { + rootCmd.SilenceUsage = true + rootCmd.SilenceErrors = true + rootCmd.CompletionOptions.DisableDefaultCmd = true + + rootCmd.Version = version + rootCmd.SetVersionTemplate(`{{printf "%s" .Version}} +`) + + cfg, err := config.New(logger, version) + if err != nil { + return err + } + + rootCmd.PersistentFlags().CountVarP(&cfg.Level, "level", "l", "set log level") + rootCmd.PersistentFlags().BoolVar(&cfg.NoColor, "no-color", false, "disable color output") + rootCmd.PersistentFlags().StringVar(&cfg.Token, "token", cfg.Token, "vercel token") + rootCmd.PersistentFlags().StringVar(&cfg.TeamSlug, "team", cfg.TeamSlug, "vercel team slug") + rootCmd.PersistentFlags().StringVar(&cfg.ApiUrl, "api", cfg.ApiUrl, "vercel api url") + rootCmd.PersistentFlags().StringVar(&cfg.LoginUrl, "url", cfg.LoginUrl, "vercel login url") + rootCmd.PersistentFlags().BoolVar(&cfg.NoGC, "no-gc", false, "") + rootCmd.PersistentFlags().StringVar(&cfg.Heap, "heap", "", "outputs the heap trace to the given file") + rootCmd.PersistentFlags().StringVar(&cfg.Trace, "trace", "", "outputs the cpu trace to the given file") + rootCmd.PersistentFlags().StringVar(&cfg.CpuProfile, "cpu-profile", "", "outputs the cpu profile to the given file") + + rootCmd.PersistentFlags().Lookup("token").DefValue = "" + rootCmd.PersistentFlags().Lookup("no-gc").Hidden = true + + ch := &cmdutil.Helper{ + Logger: logger, + Config: cfg, + Processes: processes, + } + + rootCmd.PersistentPreRunE = ch.PreRun() + + runCmd := run.RunCmd(ch) + pruneCmd := prune.PruneCmd(ch) + if runCmd == nil || pruneCmd == nil { + return ch.Logger.Errorf("could not determine cwd") + } + + rootCmd.AddCommand(info.BinCmd(ch)) + rootCmd.AddCommand(auth.LinkCmd(ch)) + rootCmd.AddCommand(auth.UnlinkCmd(ch)) + rootCmd.AddCommand(auth.LoginCmd(ch)) + rootCmd.AddCommand(auth.LogoutCmd(ch)) + rootCmd.AddCommand(runCmd) + rootCmd.AddCommand(pruneCmd) + + cpuProfile := false + for _, arg := range os.Args { + if strings.Contains(arg, "cpu-profile") { + cpuProfile = true + break + } + } + if cpuProfile { + // The CPU profiler in Go only runs at 100 Hz, which is far too slow to + // return useful information for esbuild, since it's so fast. Let's keep + // running for 30 seconds straight, which should give us 3,000 samples. + seconds := 30.0 + start := time.Now() + for time.Since(start).Seconds() < seconds { + rootCmd.Execute() + } + } + + return rootCmd.Execute() +} diff --git a/cli/internal/cmd/run/run.go b/cli/internal/cmd/run/run.go new file mode 100644 index 0000000000000..32853a2d02cfe --- /dev/null +++ b/cli/internal/cmd/run/run.go @@ -0,0 +1,942 @@ +package run + +import ( + "bufio" + gocontext "context" + "encoding/json" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "sort" + "strings" + "sync" + "text/tabwriter" + "time" + + "github.com/fatih/color" + "github.com/hashicorp/go-hclog" + "github.com/pkg/errors" + "github.com/pyr-sh/dag" + "github.com/spf13/cobra" + "github.com/vercel/turborepo/cli/internal/analytics" + "github.com/vercel/turborepo/cli/internal/api" + "github.com/vercel/turborepo/cli/internal/cache" + "github.com/vercel/turborepo/cli/internal/cmdutil" + "github.com/vercel/turborepo/cli/internal/context" + "github.com/vercel/turborepo/cli/internal/core" + "github.com/vercel/turborepo/cli/internal/fs" + "github.com/vercel/turborepo/cli/internal/globby" + "github.com/vercel/turborepo/cli/internal/logger" + "github.com/vercel/turborepo/cli/internal/logstreamer" + "github.com/vercel/turborepo/cli/internal/process" + "github.com/vercel/turborepo/cli/internal/run" + "github.com/vercel/turborepo/cli/internal/scm" + "github.com/vercel/turborepo/cli/internal/scope" + "github.com/vercel/turborepo/cli/internal/ui" + "github.com/vercel/turborepo/cli/internal/util" + "github.com/vercel/turborepo/cli/internal/util/browser" +) + +const ( + TOPOLOGICAL_PIPELINE_DELIMITER = "^" + ENV_PIPELINE_DELIMITER = "$" + FullLogs = "full" + HashLogs = "hash" + NoLogs = "none" +) + +// completeGraph represents the common state inferred from the filesystem and pipeline. +// It is not intended to include information specific to a particular run. +type completeGraph struct { + TopologicalGraph dag.AcyclicGraph + Pipeline map[string]fs.Pipeline + SCC [][]dag.Vertex + PackageInfos map[interface{}]*fs.PackageJSON + GlobalHash string + RootNode string +} + +// runSpec contains the run-specific configuration elements that come from a particular +// invocation of turbo. +type runSpec struct { + Targets []string + FilteredPkgs util.Set + Opts *run.RunOptions +} + +type hashedTask struct { + TaskID string `json:"taskId"` + Task string `json:"task"` + Package string `json:"package"` + Hash string `json:"hash"` + Command string `json:"command"` + Outputs []string `json:"outputs"` + LogFile string `json:"logFile"` + Dir string `json:"directory"` + Dependencies []string `json:"dependencies"` + Dependents []string `json:"dependents"` +} + +type execContext struct { + colorCache *run.ColorCache + runState *run.RunState + rs *runSpec + logReplayWaitGroup sync.WaitGroup + logger *logger.ConcurrentLogger + turboCache cache.Cache + hlogger hclog.Logger + backend *api.LanguageBackend + processes *process.Manager +} + +type packageTask struct { + taskID string + task string + packageName string + pkg *fs.PackageJSON + pipeline *fs.Pipeline +} + +func (rs *runSpec) ArgsForTask(task string) []string { + passThroughArgs := make([]string, 0, len(rs.Opts.PassThroughArgs)) + for _, target := range rs.Targets { + if target == task { + passThroughArgs = append(passThroughArgs, rs.Opts.PassThroughArgs...) + } + } + return passThroughArgs +} + +func (e *execContext) logError(prefix, format string, args ...interface{}) { + err := fmt.Errorf(format, args...) + e.hlogger.Error(prefix, "error", err) + e.logger.Printf("%v", fmt.Errorf("%s%s", prefix, color.RedString(" %v", err))) +} + +func RunCmd(ch *cmdutil.Helper) *cobra.Command { + passThroughArgs := strings.SplitN(strings.Join(os.Args, " "), " -- ", 2) + opts := &run.RunOptions{ + Bail: true, + IncludeDependents: true, + } + + var runOpts struct { + concurrency string + } + + cmd := &cobra.Command{ + Use: "run", + Short: "Run tasks across projects in your monorepo", + Long: `Run tasks across projects in your monorepo. + +By default, turbo executes tasks in topological order (i.e. +dependencies first) and then caches the results. Re-running commands for +tasks already in the cache will skip re-execution and immediately move +artifacts from the cache into the correct output folders (as if the task +occurred again). +`, + Args: cobra.MinimumNArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + // Force streaming output in CI/CD non-interactive mode + if !logger.IsTTY || logger.IsCI { + opts.Stream = true + } + // We can only set this cache folder after we know actual cwd + opts.CacheDir = filepath.Join(opts.Cwd, opts.CacheDir) + if !opts.Graph { + opts.DotGraph = "" + } else { + if opts.DotGraph == "" { + opts.DotGraph = fmt.Sprintf("graph-%v.jpg", time.Now().UnixNano()) + } + } + if opts.DryRunType != "" { + opts.DryRun = true + } + if len(opts.Scope) != 0 && opts.Since != "" && !opts.IncludeDeps { + opts.IncludeDeps = true + } + if len(passThroughArgs) == 2 { + opts.PassThroughArgs = strings.Split(passThroughArgs[1], " ") + } + if opts.OutputLogs != "" { + switch opts.OutputLogs { + case "full": + opts.CacheMissLogsMode = FullLogs + opts.CacheHitLogsMode = FullLogs + case "none": + opts.CacheMissLogsMode = NoLogs + opts.CacheHitLogsMode = NoLogs + case "hash-only": + opts.CacheMissLogsMode = HashLogs + opts.CacheHitLogsMode = HashLogs + case "new-only": + opts.CacheMissLogsMode = FullLogs + opts.CacheHitLogsMode = HashLogs + default: + ch.LogWarning("unknown value %v for --output-logs CLI flag. Falling back to full", opts.OutputLogs) + } + } + if concurrency, err := util.ParseConcurrency(runOpts.concurrency); err != nil { + return err + } else { + opts.Concurrency = concurrency + } + + startAt := time.Now() + log.SetFlags(0) + + ch.Config.Cache.Dir = opts.CacheDir + + ctx, err := context.New(context.WithGraph(opts.Cwd, ch.Config)) + if err != nil { + return ch.LogError("%w", err) + } + targets, err := getTargetsFromArguments(args, ctx.TurboConfig) + if err != nil { + return ch.LogError("failed to resolve targets: %w", err) + } + + scmInstance, err := scm.FromInRepo(opts.Cwd) + if err != nil { + if errors.Is(err, scm.ErrFallback) { + ch.LogWarning("%w", err) + } else { + return ch.LogError("failed to create SCM: %w", err) + } + } + + filteredPkgs, err := scope.ResolvePackages(opts.ScopeOpts(), scmInstance, ctx, ch.Logger, ch.Config.Logger) + if err != nil { + ch.LogError("failed resolve packages to run %v", err) + } + + ch.Config.Logger.Debug("global hash", "value", ctx.GlobalHash) + ch.Config.Logger.Debug("local cache folder", "path", opts.CacheDir) + fs.EnsureDir(opts.CacheDir) + + // TODO: consolidate some of these arguments + g := &completeGraph{ + TopologicalGraph: ctx.TopologicalGraph, + Pipeline: ctx.TurboConfig.Pipeline, + SCC: ctx.SCC, + PackageInfos: ctx.PackageInfos, + GlobalHash: ctx.GlobalHash, + RootNode: ctx.RootNode, + } + rs := &runSpec{ + Targets: targets, + FilteredPkgs: filteredPkgs, + Opts: opts, + } + backend := ctx.Backend + return runOperation(ch, g, rs, backend, startAt) + }, + } + + path, err := os.Getwd() + if err != nil { + return nil + } + + cmd.Flags().StringArrayVar(&opts.Scope, "scope", []string{}, "package(s) to act as entry points for task execution, supports globs") + cmd.Flags().StringVar(&opts.CacheDir, "cache-dir", filepath.FromSlash("./node_modules/.cache/turbo"), "Specify local filesystem cache directory") + cmd.Flags().StringVar(&runOpts.concurrency, "concurrency", "10", "concurrency of task execution") + cmd.Flags().BoolVar(&opts.ShouldContinue, "continue", false, "continue execution even if a task exits with an error or non-zero exit code") + cmd.Flags().BoolVarP(&opts.Force, "force", "f", false, "ignore the existing cache") + cmd.Flags().StringVar(&opts.Profile, "profile", "", "file to write turbo's performance profile output into") + cmd.Flags().BoolVarP(&opts.Graph, "graph", "g", false, "generate a Dot graph of the task execution") + cmd.Flags().StringVar(&opts.DotGraph, "graph-path", "", "path for Dot graph") + cmd.Flags().StringArrayVar(&opts.GlobalDeps, "global-deps", []string{}, "glob of global filesystem dependencies to be hashed") + cmd.Flags().StringVar(&opts.Since, "since", "", "limit/set scope to changed packages since a mergebase") + cmd.Flags().StringArrayVar(&opts.Ignore, "ignore", []string{}, "files to ignore when calculating changed files, supports globs") + cmd.Flags().BoolVarP(&opts.Parallel, "parallel", "p", false, "execute all tasks in parallel") + cmd.Flags().BoolVar(&opts.IncludeDeps, "include-deps", false, "include the dependencies of tasks in execution") + cmd.Flags().BoolVar(&opts.NoDeps, "no-deps", false, "exclude dependent task consumers from execution") + cmd.Flags().BoolVar(&opts.NoCache, "no-cache", false, "avoid saving task results to the cache") + cmd.Flags().StringVar(&opts.DryRunType, "dry-run", "", "don't actually run tasks") + cmd.Flags().StringVar(&opts.Cwd, "cwd", path, "directory to execute command in") + cmd.Flags().StringVar(&opts.OutputLogs, "output-logs", "full", "set type of process output logging") + cmd.Flags().BoolVar(&opts.Stream, "stream", true, "stream???") + cmd.Flags().BoolVar(&opts.Only, "only", true, "only???") + + cmd.Flags().MarkHidden("stream") + cmd.Flags().MarkHidden("only") + + return cmd +} + +func runOperation(ch *cmdutil.Helper, g *completeGraph, rs *runSpec, backend *api.LanguageBackend, startAt time.Time) error { + var topoVisit []interface{} + for _, node := range g.SCC { + v := node[0] + if v == g.RootNode { + continue + } + topoVisit = append(topoVisit, v) + pack := g.PackageInfos[v] + + ancestralHashes := make([]string, 0, len(pack.InternalDeps)) + if len(pack.InternalDeps) > 0 { + for _, ancestor := range pack.InternalDeps { + if h, ok := g.PackageInfos[ancestor]; ok { + ancestralHashes = append(ancestralHashes, h.Hash) + } + } + sort.Strings(ancestralHashes) + } + var hashable = struct { + hashOfFiles string + ancestralHashes []string + externalDepsHash string + globalHash string + }{hashOfFiles: pack.FilesHash, ancestralHashes: ancestralHashes, externalDepsHash: pack.ExternalDepsHash, globalHash: g.GlobalHash} + + var err error + pack.Hash, err = fs.HashObject(hashable) + if err != nil { + ch.LogError("%v: error computing combined hash: %v", pack.Name, err) + } + ch.Config.Logger.Debug(fmt.Sprintf("%v: package ancestralHash", pack.Name), "hash", ancestralHashes) + ch.Config.Logger.Debug(fmt.Sprintf("%v: package hash", pack.Name), "hash", pack.Hash) + } + + ch.Config.Logger.Debug("topological sort order", "value", topoVisit) + + vertexSet := make(util.Set) + for _, v := range g.TopologicalGraph.Vertices() { + vertexSet.Add(v) + } + // We remove nodes that aren't in the final filter set + for _, toRemove := range vertexSet.Difference(rs.FilteredPkgs) { + if toRemove != g.RootNode { + g.TopologicalGraph.Remove(toRemove) + } + } + + // If we are running in parallel, then we remove all the edges in the graph + // except for the root + if rs.Opts.Parallel { + for _, edge := range g.TopologicalGraph.Edges() { + if edge.Target() != g.RootNode { + g.TopologicalGraph.RemoveEdge(edge) + } + } + } + + engine, err := buildTaskGraph(&g.TopologicalGraph, g.Pipeline, rs) + if err != nil { + return ch.LogError("preparing engine: %s", err) + } + + if rs.Opts.DotGraph != "" { + err := generateDotGraph(ch, engine.TaskGraph, filepath.Join(rs.Opts.Cwd, rs.Opts.DotGraph)) + if err != nil { + return ch.LogError(err.Error()) + } + } else if rs.Opts.DryRun { + tasksRun, err := executeDryRun(ch, engine, g, rs, ch.Config.Logger) + if err != nil { + return ch.LogError(err.Error()) + } + + packagesInScope := rs.FilteredPkgs.UnsafeListOfStrings() + sort.Strings(packagesInScope) + if rs.Opts.DryRunType == "json" { + dryRun := &struct { + Packages []string `json:"packages"` + Tasks []hashedTask `json:"tasks"` + }{ + Packages: packagesInScope, + Tasks: tasksRun, + } + + bytes, err := json.MarshalIndent(dryRun, "", " ") + if err != nil { + return ch.LogError("failed to render to JSON: %w", err) + } + + ch.Logger.Printf(string(bytes)) + } else { + ch.Logger.Printf("") + ch.Logger.Printf("${CYAN}${BOLD}Packages in Scope${RESET}") + + p := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(p, "Name\tPath\t") + for _, pkg := range packagesInScope { + fmt.Fprintf(p, "%s\t%s\t", pkg, g.PackageInfos[pkg].Dir) + } + p.Flush() + + ch.Logger.Printf("") + ch.Logger.Printf("${CYAN}${BOLD}Tasks to Run${RESET}") + + for _, task := range tasksRun { + ch.Logger.Printf("${BOLD}%s${RESET}", task.TaskID) + w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Task\t=\t%s\t${RESET}", task.Task)) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Package\t=\t%s\t${RESET}", task.Package)) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Hash\t=\t%s\t${RESET}", task.Hash)) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Directory\t=\t%s\t${RESET}", task.Dir)) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Command\t=\t%s\t${RESET}", task.Command)) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Outputs\t=\t%s\t${RESET}", strings.Join(task.Outputs, ", "))) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Log File\t=\t%s\t${RESET}", task.LogFile)) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Dependencies\t=\t%s\t${RESET}", strings.Join(task.Dependencies, ", "))) + fmt.Fprintln(w, util.Sprintf(" ${GREY}Dependendents\t=\t%s\t${RESET}", strings.Join(task.Dependents, ", "))) + w.Flush() + } + } + } else { + packagesInScope := rs.FilteredPkgs.UnsafeListOfStrings() + sort.Strings(packagesInScope) + + ch.Logger.Printf(ui.Dim("• Packages in scope: %v"), strings.Join(packagesInScope, ", ")) + if rs.Opts.Stream { + ch.Logger.Printf("%s %s %s", ui.Dim("• Running"), ui.Dim(ui.Bold(strings.Join(rs.Targets, ", "))), ui.Dim(fmt.Sprintf("in %v packages", rs.FilteredPkgs.Len()))) + } + + return executeTasks(ch, g, rs, engine, backend, startAt) + } + + return nil +} + +func buildTaskGraph(topoGraph *dag.AcyclicGraph, pipeline map[string]fs.Pipeline, rs *runSpec) (*core.Scheduler, error) { + engine := core.NewScheduler(topoGraph) + for taskName, value := range pipeline { + topoDeps := make(util.Set) + deps := make(util.Set) + if util.IsPackageTask(taskName) { + for _, from := range value.DependsOn { + if strings.HasPrefix(from, ENV_PIPELINE_DELIMITER) { + continue + } + if util.IsPackageTask(from) { + engine.AddDep(from, taskName) + continue + } else if strings.Contains(from, TOPOLOGICAL_PIPELINE_DELIMITER) { + topoDeps.Add(from[1:]) + } else { + deps.Add(from) + } + } + _, id := util.GetPackageTaskFromId(taskName) + taskName = id + } else { + for _, from := range value.DependsOn { + if strings.HasPrefix(from, ENV_PIPELINE_DELIMITER) { + continue + } + if strings.Contains(from, TOPOLOGICAL_PIPELINE_DELIMITER) { + topoDeps.Add(from[1:]) + } else { + deps.Add(from) + } + } + } + + engine.AddTask(&core.Task{ + Name: taskName, + TopoDeps: topoDeps, + Deps: deps, + }) + } + + if err := engine.Prepare(&core.SchedulerExecutionOptions{ + Packages: rs.FilteredPkgs.UnsafeListOfStrings(), + TaskNames: rs.Targets, + TasksOnly: rs.Opts.Only, + }); err != nil { + return nil, err + } + return engine, nil +} + +func hasGraphViz() bool { + err := exec.Command("dot", "-v").Run() + return err == nil +} + +func executeTasks(ch *cmdutil.Helper, g *completeGraph, rs *runSpec, engine *core.Scheduler, backend *api.LanguageBackend, startAt time.Time) error { + goctx := gocontext.Background() + + var analyticsSink analytics.Sink + if ch.Config.IsAuthenticated() { + analyticsSink = ch.Config.ApiClient + } else { + analyticsSink = analytics.NullSink + } + + analyticsClient := analytics.NewClient(goctx, analyticsSink, ch.Config.Logger.Named("analytics")) + defer analyticsClient.CloseWithTimeout(50 * time.Millisecond) + + turboCache := cache.New(ch.Config, analyticsClient) + defer turboCache.Shutdown() + + clogger := logger.NewConcurrent(ch.Logger) + runState := run.NewRunState(rs.Opts, startAt) + runState.Listen(ch.Logger, time.Now()) + ec := &execContext{ + colorCache: run.NewColorCache(), + runState: runState, + rs: rs, + logger: clogger, + turboCache: turboCache, + hlogger: ch.Config.Logger, + backend: backend, + processes: ch.Processes, + } + + // run the thing + errs := engine.Execute(g.getPackageTaskVisitor(ec.exec), core.ExecOpts{ + Parallel: rs.Opts.Parallel, + Concurrency: rs.Opts.Concurrency, + }) + + // Track if we saw any child with a non-zero exit code + exitCode := 0 + exitCodeErr := &process.ChildExit{} + for _, err := range errs { + if errors.As(err, &exitCodeErr) { + if exitCodeErr.ExitCode > exitCode { + exitCode = exitCodeErr.ExitCode + } + } + ch.Logger.Printf("%v", ch.Logger.Errorf(err.Error())) + } + + ec.logReplayWaitGroup.Wait() + + if err := runState.Close(ch.Logger, rs.Opts.Profile); err != nil { + return &cmdutil.Error{ + ExitCode: exitCode, + Err: ch.Logger.Errorf("error with profiler: %s", err.Error()), + } + } + + if exitCode != 0 { + return &cmdutil.Error{ + ExitCode: exitCode, + } + } + + return nil +} + +func executeDryRun(ch *cmdutil.Helper, engine *core.Scheduler, g *completeGraph, rs *runSpec, logger hclog.Logger) ([]hashedTask, error) { + taskIDs := []hashedTask{} + errs := engine.Execute(g.getPackageTaskVisitor(func(pt *packageTask) error { + command, ok := pt.pkg.Scripts[pt.task] + if !ok { + logger.Debug("no task in package, skipping") + logger.Debug("done", "status", "skipped") + return nil + } + passThroughArgs := rs.ArgsForTask(pt.task) + hash, err := pt.hash(passThroughArgs, logger) + if err != nil { + return err + } + ancestors, err := engine.TaskGraph.Ancestors(pt.taskID) + if err != nil { + return err + } + stringAncestors := []string{} + for _, dep := range ancestors { + // Don't leak out internal ROOT_NODE_NAME nodes, which are just placeholders + if !strings.Contains(dep.(string), core.ROOT_NODE_NAME) { + stringAncestors = append(stringAncestors, dep.(string)) + } + } + descendents, err := engine.TaskGraph.Descendents(pt.taskID) + if err != nil { + return err + } + stringDescendents := []string{} + for _, dep := range descendents { + // Don't leak out internal ROOT_NODE_NAME nodes, which are just placeholders + if !strings.Contains(dep.(string), core.ROOT_NODE_NAME) { + stringDescendents = append(stringDescendents, dep.(string)) + } + } + sort.Strings(stringDescendents) + + taskIDs = append(taskIDs, hashedTask{ + TaskID: pt.taskID, + Task: pt.task, + Package: pt.packageName, + Hash: hash, + Command: command, + Dir: pt.pkg.Dir, + Outputs: pt.ExternalOutputs(), + LogFile: pt.RepoRelativeLogFile(), + Dependencies: stringAncestors, + Dependents: stringDescendents, + }) + return nil + }), core.ExecOpts{ + Concurrency: 1, + Parallel: false, + }) + if len(errs) > 0 { + for _, err := range errs { + ch.Logger.Errorf(err.Error()) + } + return nil, errors.New("errors occurred during dry-run graph traversal") + } + return taskIDs, nil +} + +// Replay logs will try to replay logs back to the stdout +func replayLogs(logger hclog.Logger, cLogger *logger.ConcurrentLogger, runOptions *run.RunOptions, logFileName, hash string, wg *sync.WaitGroup, silent bool, outputLogsMode string) { + defer wg.Done() + logger.Debug("start replaying logs") + f, err := os.Open(filepath.Join(runOptions.Cwd, logFileName)) + if err != nil && !silent { + cLogger.Printf("%v", cLogger.Warnf("error reading logs: %v", err)) + logger.Error(fmt.Sprintf("error reading logs: %v", err.Error())) + } + defer f.Close() + if outputLogsMode != NoLogs { + scan := bufio.NewScanner(f) + if outputLogsMode == HashLogs { + //Writing to Stdout only the "cache hit, replaying output" line + scan.Scan() + cLogger.Printf(ui.StripAnsi(string(scan.Bytes()))) + } else { + for scan.Scan() { + cLogger.Printf(ui.StripAnsi(string(scan.Bytes()))) //Writing to Stdout + } + } + } + logger.Debug("finish replaying logs") +} + +// GetTargetsFromArguments returns a list of targets from the arguments and Turbo config. +// Return targets are always unique sorted alphabetically. +func getTargetsFromArguments(arguments []string, configJson *fs.TurboConfigJSON) ([]string, error) { + targets := make(util.Set) + for _, arg := range arguments { + if arg == "--" { + break + } + if !strings.HasPrefix(arg, "-") { + targets.Add(arg) + found := false + for task := range configJson.Pipeline { + if task == arg { + found = true + } + } + if !found { + return nil, fmt.Errorf("task `%v` not found in turbo pipeline in package.json. Are you sure you added it?", arg) + } + } + } + stringTargets := targets.UnsafeListOfStrings() + sort.Strings(stringTargets) + return stringTargets, nil +} + +func (e *execContext) exec(pt *packageTask) error { + cmdTime := time.Now() + + targetHlogger := e.hlogger.Named(fmt.Sprintf("%v:%v", pt.pkg.Name, pt.task)) + targetHlogger.Debug("start") + + // bail if the script doesn't exist + if _, ok := pt.pkg.Scripts[pt.task]; !ok { + targetHlogger.Debug("no task in package, skipping") + targetHlogger.Debug("done", "status", "skipped", "duration", time.Since(cmdTime)) + return nil + } + + // Setup tracer + tracer := e.runState.Run(util.GetTaskId(pt.pkg.Name, pt.task)) + + // Create a logger + pref := e.colorCache.PrefixColor(pt.pkg.Name) + actualPrefix := pref("%s:%s: ", pt.pkg.Name, pt.task) + targetLogger := logger.NewPrefixed(actualPrefix, actualPrefix, actualPrefix, actualPrefix) + + logFileName := filepath.Join(pt.pkg.Dir, ".turbo", fmt.Sprintf("turbo-%v.log", pt.task)) + targetHlogger.Debug("log file", "path", filepath.Join(e.rs.Opts.Cwd, logFileName)) + + passThroughArgs := e.rs.ArgsForTask(pt.task) + hash, err := pt.hash(passThroughArgs, e.hlogger) + e.hlogger.Debug("task hash", "value", hash) + if err != nil { + targetLogger.Printf("%v", targetLogger.Errorf("hashing error: %v", err)) + // @TODO probably should abort fatally??? + } + // Cache --------------------------------------------- + var hit bool + if !e.rs.Opts.Force { + hit, _, _, err = e.turboCache.Fetch(e.rs.Opts.Cwd, hash, nil) + if err != nil { + targetLogger.Printf("%v", targetLogger.Errorf(fmt.Sprintf("error fetching from cache: %s", err))) + } else if hit { + if e.rs.Opts.Stream && fs.FileExists(filepath.Join(e.rs.Opts.Cwd, logFileName)) { + e.logReplayWaitGroup.Add(1) + go replayLogs(targetHlogger, e.logger, e.rs.Opts, logFileName, hash, &e.logReplayWaitGroup, false, e.rs.Opts.CacheHitLogsMode) + } + targetHlogger.Debug("done", "status", "complete", "duration", time.Since(cmdTime)) + tracer(run.TargetCached, nil) + + return nil + } + if e.rs.Opts.Stream && e.rs.Opts.CacheHitLogsMode != NoLogs { + targetLogger.Output("cache miss, executing %s", ui.Dim(hash)) + } + } else { + if e.rs.Opts.Stream && e.rs.Opts.CacheHitLogsMode != NoLogs { + targetLogger.Output("cache bypass, force executing %s", ui.Dim(hash)) + } + } + + // Setup command execution + argsactual := append([]string{"run"}, pt.task) + argsactual = append(argsactual, passThroughArgs...) + // @TODO: @jaredpalmer fix this hack to get the package manager's name + var cmd *exec.Cmd + if e.backend.Name == "nodejs-berry" { + cmd = exec.Command("yarn", argsactual...) + } else { + cmd = exec.Command(strings.TrimPrefix(e.backend.Name, "nodejs-"), argsactual...) + } + cmd.Dir = pt.pkg.Dir + envs := fmt.Sprintf("TURBO_HASH=%v", hash) + cmd.Env = append(os.Environ(), envs) + + // Setup stdout/stderr + // If we are not caching anything, then we don't need to write logs to disk + // be careful about this conditional given the default of cache = true + var writer io.Writer + if e.rs.Opts.NoCache || (pt.pipeline.Cache != nil && !*pt.pipeline.Cache) { + writer = os.Stdout + } else { + // Setup log file + if err := fs.EnsureDir(logFileName); err != nil { + tracer(run.TargetBuildFailed, err) + e.logError(actualPrefix, "%w", err) + if e.rs.Opts.Bail { + os.Exit(1) + } + } + output, err := os.Create(logFileName) + if err != nil { + tracer(run.TargetBuildFailed, err) + e.logError(actualPrefix, "%w", err) + if e.rs.Opts.Bail { + os.Exit(1) + } + } + defer output.Close() + bufWriter := bufio.NewWriter(output) + bufWriter.WriteString(fmt.Sprintf("%scache hit, replaying output %s\n", actualPrefix, ui.Dim(hash))) + defer bufWriter.Flush() + if e.rs.Opts.CacheMissLogsMode == NoLogs || e.rs.Opts.CacheMissLogsMode == HashLogs { + // only write to log file, not to stdout + writer = bufWriter + } else { + writer = io.MultiWriter(os.Stdout, bufWriter) + } + } + + logger := log.New(writer, "", 0) + // Setup a streamer that we'll pipe cmd.Stdout to + logStreamerOut := logstreamer.NewLogstreamer(logger, actualPrefix, false) + // Setup a streamer that we'll pipe cmd.Stderr to. + logStreamerErr := logstreamer.NewLogstreamer(logger, actualPrefix, false) + cmd.Stderr = logStreamerErr + cmd.Stdout = logStreamerOut + // Flush/Reset any error we recorded + logStreamerErr.FlushRecord() + logStreamerOut.FlushRecord() + + // Run the command + if err := e.processes.Exec(cmd); err != nil { + // if we already know we're in the process of exiting, + // we don't need to record an error to that effect. + if errors.Is(err, process.ErrClosing) { + return nil + } + tracer(run.TargetBuildFailed, err) + targetHlogger.Error("Error: command finished with error: %w", err) + if e.rs.Opts.Bail { + if e.rs.Opts.Stream { + targetLogger.Printf("%s", fmt.Errorf("%sError: command finished with error: %w", actualPrefix, err).Error()) + } else { + f, err := os.Open(filepath.Join(e.rs.Opts.Cwd, logFileName)) + if err != nil { + targetLogger.Printf("%v", targetLogger.Warnf("failed reading logs: %v", err)) + } + defer f.Close() + scan := bufio.NewScanner(f) + targetLogger.Printf("%v", targetLogger.Errorf("")) + targetLogger.Printf("%v", targetLogger.Errorf("%s ${RED}%s finished with error${RESET}", ui.ERROR_PREFIX, util.GetTaskId(pt.pkg.Name, pt.task))) + targetLogger.Printf("%v", targetLogger.Errorf("")) + for scan.Scan() { + e.logger.Printf("${RED}%s:%s: ${RESET}%s", pt.pkg.Name, pt.task, scan.Bytes()) // Writing to Stdout + } + } + e.processes.Close() + } else { + if e.rs.Opts.Stream { + targetLogger.Printf("%v", targetLogger.Warnf("command finished with error, but continuing...")) + } + } + return err + } + + // Cache command outputs + if !e.rs.Opts.NoCache && (pt.pipeline.Cache == nil || *pt.pipeline.Cache) { + outputs := pt.HashableOutputs() + targetHlogger.Debug("caching output", "outputs", outputs) + ignore := []string{} + filesToBeCached := globby.GlobFiles(pt.pkg.Dir, outputs, ignore) + if err := e.turboCache.Put(pt.pkg.Dir, hash, int(time.Since(cmdTime).Milliseconds()), filesToBeCached); err != nil { + e.logError("", "error caching output: %w", err) + } + } + + // Clean up tracing + tracer(run.TargetBuilt, nil) + targetHlogger.Debug("done", "status", "complete", "duration", time.Since(cmdTime)) + return nil +} + +func generateDotGraph(ch *cmdutil.Helper, taskGraph *dag.AcyclicGraph, outputFilename string) error { + graphString := string(taskGraph.Dot(&dag.DotOpts{ + Verbose: true, + DrawCycles: true, + })) + ext := filepath.Ext(outputFilename) + if ext == ".html" { + f, err := os.Create(outputFilename) + if err != nil { + return fmt.Errorf("error writing graph: %w", err) + } + defer f.Close() + f.WriteString(` + + + + Graph + + + + + + + `) + ch.Logger.Printf("") + ch.Logger.Printf(fmt.Sprintf("✔ Generated task graph in %s", ui.Bold(outputFilename))) + if ui.IsTTY { + browser.OpenBrowser(outputFilename) + } + return nil + } + hasDot := hasGraphViz() + if hasDot { + dotArgs := []string{"-T" + ext[1:], "-o", outputFilename} + cmd := exec.Command("dot", dotArgs...) + cmd.Stdin = strings.NewReader(graphString) + if err := cmd.Run(); err != nil { + return fmt.Errorf("could not generate task graphfile %v: %w", outputFilename, err) + } else { + ch.Logger.Printf("") + ch.Logger.Printf("✔ Generated task graph in %s", ui.Bold(outputFilename)) + } + } else { + ch.Logger.Printf("") + ch.Logger.Printf("%v", ch.LogWarning("`turbo` uses Graphviz to generate an image of your\ngraph, but Graphviz isn't installed on this machine.\n\nYou can download Graphviz from https://graphviz.org/download.\n\nIn the meantime, you can use this string output with an\nonline Dot graph viewer.")) + ch.Logger.Printf("") + ch.Logger.Printf(graphString) + } + return nil +} + +func (pt *packageTask) ExternalOutputs() []string { + if pt.pipeline.Outputs == nil { + return []string{"dist/**/*", "build/**/*"} + } + return pt.pipeline.Outputs +} + +func (pt *packageTask) RepoRelativeLogFile() string { + return filepath.Join(pt.pkg.Dir, ".turbo", fmt.Sprintf("turbo-%v.log", pt.task)) +} + +func (pt *packageTask) HashableOutputs() []string { + outputs := []string{fmt.Sprintf(".turbo/turbo-%v.log", pt.task)} + outputs = append(outputs, pt.ExternalOutputs()...) + return outputs +} + +func (pt *packageTask) hash(args []string, logger hclog.Logger) (string, error) { + // Hash --------------------------------------------- + outputs := pt.HashableOutputs() + logger.Debug("task output globs", "outputs", outputs) + + // Hash the task-specific environment variables found in the dependsOnKey in the pipeline + var hashableEnvVars []string + var hashableEnvPairs []string + if len(pt.pipeline.DependsOn) > 0 { + for _, v := range pt.pipeline.DependsOn { + if strings.Contains(v, ENV_PIPELINE_DELIMITER) { + trimmed := strings.TrimPrefix(v, ENV_PIPELINE_DELIMITER) + hashableEnvPairs = append(hashableEnvPairs, fmt.Sprintf("%v=%v", trimmed, os.Getenv(trimmed))) + hashableEnvVars = append(hashableEnvVars, trimmed) + } + } + sort.Strings(hashableEnvVars) // always sort them + } + logger.Debug("hashable env vars", "vars", hashableEnvVars) + hashable := struct { + Hash string + Task string + Outputs []string + PassThruArgs []string + HashableEnvPairs []string + }{ + Hash: pt.pkg.Hash, + Task: pt.task, + Outputs: outputs, + PassThruArgs: args, + HashableEnvPairs: hashableEnvPairs, + } + return fs.HashObject(hashable) +} + +func (g *completeGraph) getPackageTaskVisitor(visitor func(pt *packageTask) error) func(taskID string) error { + return func(taskID string) error { + name, task := util.GetPackageTaskFromId(taskID) + pkg := g.PackageInfos[name] + // first check for package-tasks + pipeline, ok := g.Pipeline[fmt.Sprintf("%v", taskID)] + if !ok { + // then check for regular tasks + altpipe, notcool := g.Pipeline[task] + // if neither, then bail + if !notcool && !ok { + return nil + } + // override if we need to... + pipeline = altpipe + } + return visitor(&packageTask{ + taskID: taskID, + task: task, + packageName: name, + pkg: pkg, + pipeline: &pipeline, + }) + } +} diff --git a/cli/internal/cmd/run/run_test.go b/cli/internal/cmd/run/run_test.go new file mode 100644 index 0000000000000..c393ff8bcde83 --- /dev/null +++ b/cli/internal/cmd/run/run_test.go @@ -0,0 +1,286 @@ +package run + +import ( +// "fmt" +// "os" +// "path/filepath" +// "reflect" +// "testing" + +// "github.com/stretchr/testify/assert" +// "github.com/vercel/turborepo/cli/internal/fs" +// "github.com/vercel/turborepo/cli/internal/run" +) + +// func TestParseConfig(t *testing.T) { +// defaultCwd, err := os.Getwd() +// if err != nil { +// t.Errorf("failed to get cwd: %v", err) +// } +// defaultCacheFolder := filepath.Join(defaultCwd, filepath.FromSlash("node_modules/.cache/turbo")) +// cases := []struct { +// Name string +// Args []string +// Expected *run.RunOptions +// }{ +// { +// "string flags", +// []string{"foo"}, +// &run.RunOptions{ +// includeDependents: true, +// stream: true, +// bail: true, +// dotGraph: "", +// concurrency: 10, +// includeDependencies: false, +// cache: true, +// forceExecution: false, +// profile: "", +// cwd: defaultCwd, +// cacheFolder: defaultCacheFolder, +// cacheHitLogsMode: FullLogs, +// cacheMissLogsMode: FullLogs, +// }, +// }, +// { +// "cwd", +// []string{"foo", "--cwd=zop"}, +// &run.RunOptions{ +// includeDependents: true, +// stream: true, +// bail: true, +// dotGraph: "", +// concurrency: 10, +// includeDependencies: false, +// cache: true, +// forceExecution: false, +// profile: "", +// cwd: "zop", +// cacheFolder: filepath.FromSlash("zop/node_modules/.cache/turbo"), +// cacheHitLogsMode: FullLogs, +// cacheMissLogsMode: FullLogs, +// }, +// }, +// { +// "scope", +// []string{"foo", "--scope=foo", "--scope=blah"}, +// &run.RunOptions{ +// includeDependents: true, +// stream: true, +// bail: true, +// dotGraph: "", +// concurrency: 10, +// includeDependencies: false, +// cache: true, +// forceExecution: false, +// profile: "", +// scope: []string{"foo", "blah"}, +// cwd: defaultCwd, +// cacheFolder: defaultCacheFolder, +// cacheHitLogsMode: FullLogs, +// cacheMissLogsMode: FullLogs, +// }, +// }, +// { +// "concurrency", +// []string{"foo", "--concurrency=12"}, +// &run.RunOptions{ +// includeDependents: true, +// stream: true, +// bail: true, +// dotGraph: "", +// concurrency: 12, +// includeDependencies: false, +// cache: true, +// forceExecution: false, +// profile: "", +// cwd: defaultCwd, +// cacheFolder: defaultCacheFolder, +// cacheHitLogsMode: FullLogs, +// cacheMissLogsMode: FullLogs, +// }, +// }, +// { +// "graph", +// []string{"foo", "--graph=g.png"}, +// &run.RunOptions{ +// includeDependents: true, +// stream: true, +// bail: true, +// dotGraph: "g.png", +// concurrency: 10, +// includeDependencies: false, +// cache: true, +// forceExecution: false, +// profile: "", +// cwd: defaultCwd, +// cacheFolder: defaultCacheFolder, +// cacheHitLogsMode: FullLogs, +// cacheMissLogsMode: FullLogs, +// }, +// }, +// { +// "passThroughArgs", +// []string{"foo", "--graph=g.png", "--", "--boop", "zoop"}, +// &run.RunOptions{ +// includeDependents: true, +// stream: true, +// bail: true, +// dotGraph: "g.png", +// concurrency: 10, +// includeDependencies: false, +// cache: true, +// forceExecution: false, +// profile: "", +// cwd: defaultCwd, +// cacheFolder: defaultCacheFolder, +// passThroughArgs: []string{"--boop", "zoop"}, +// cacheHitLogsMode: FullLogs, +// cacheMissLogsMode: FullLogs, +// }, +// }, +// { +// "Empty passThroughArgs", +// []string{"foo", "--graph=g.png", "--"}, +// &run.RunOptions{ +// includeDependents: true, +// stream: true, +// bail: true, +// dotGraph: "g.png", +// concurrency: 10, +// includeDependencies: false, +// cache: true, +// forceExecution: false, +// profile: "", +// cwd: defaultCwd, +// cacheFolder: defaultCacheFolder, +// passThroughArgs: []string{}, +// cacheHitLogsMode: FullLogs, +// cacheMissLogsMode: FullLogs, +// }, +// }, +// { +// "since and scope imply including dependencies for backwards compatibility", +// []string{"foo", "--scope=bar", "--since=some-ref"}, +// &run.RunOptions{ +// includeDependents: true, +// stream: true, +// bail: true, +// concurrency: 10, +// includeDependencies: true, +// cache: true, +// cwd: defaultCwd, +// cacheFolder: defaultCacheFolder, +// scope: []string{"bar"}, +// since: "some-ref", +// cacheHitLogsMode: FullLogs, +// cacheMissLogsMode: FullLogs, +// }, +// }, +// } + +// ui := &cli.BasicUi{ +// Reader: os.Stdin, +// Writer: os.Stdout, +// ErrorWriter: os.Stderr, +// } + +// for i, tc := range cases { +// t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { + +// actual, err := parseRunArgs(tc.Args, ui) +// if err != nil { +// t.Fatalf("invalid parse: %#v", err) +// } +// assert.EqualValues(t, tc.Expected, actual)) +// }) +// } +// } + +// func TestGetTargetsFromArguments(t *testing.T) { +// type args struct { +// arguments []string +// configJson *fs.TurboConfigJSON +// } +// tests := []struct { +// name string +// args args +// want []string +// wantErr bool +// }{ +// { +// name: "handles one defined target", +// args: args{ +// arguments: []string{"build"}, +// configJson: &fs.TurboConfigJSON{ +// Pipeline: map[string]fs.Pipeline{ +// "build": {}, +// "test": {}, +// "thing#test": {}, +// }, +// }, +// }, +// want: []string{"build"}, +// wantErr: false, +// }, +// { +// name: "handles multiple targets and ignores flags", +// args: args{ +// arguments: []string{"build", "test", "--foo", "--bar"}, +// configJson: &fs.TurboConfigJSON{ +// Pipeline: map[string]fs.Pipeline{ +// "build": {}, +// "test": {}, +// "thing#test": {}, +// }, +// }, +// }, +// want: []string{"build", "test"}, +// wantErr: false, +// }, +// { +// name: "handles pass through arguments after -- ", +// args: args{ +// arguments: []string{"build", "test", "--", "--foo", "build", "--cache-dir"}, +// configJson: &fs.TurboConfigJSON{ +// Pipeline: map[string]fs.Pipeline{ +// "build": {}, +// "test": {}, +// "thing#test": {}, +// }, +// }, +// }, +// want: []string{"build", "test"}, +// wantErr: false, +// }, +// { +// name: "handles unknown pipeline targets ", +// args: args{ +// arguments: []string{"foo", "test", "--", "--foo", "build", "--cache-dir"}, +// configJson: &fs.TurboConfigJSON{ +// Pipeline: map[string]fs.Pipeline{ +// "build": {}, +// "test": {}, +// "thing#test": {}, +// }, +// }, +// }, +// want: nil, +// wantErr: true, +// }, +// } + +// for _, tt := range tests { +// t.Run(tt.name, func(t *testing.T) { +// got, err := getTargetsFromArguments(tt.args.arguments, tt.args.configJson) +// if (err != nil) != tt.wantErr { +// t.Errorf("GetTargetsFromArguments() error = %v, wantErr %v", err, tt.wantErr) +// return +// } + +// if !reflect.DeepEqual(got, tt.want) { +// t.Errorf("GetTargetsFromArguments() = %v, want %v", got, tt.want) +// } +// }) +// } +// } diff --git a/cli/internal/cmdutil/cmdutil.go b/cli/internal/cmdutil/cmdutil.go new file mode 100644 index 0000000000000..9871dd0b82ae7 --- /dev/null +++ b/cli/internal/cmdutil/cmdutil.go @@ -0,0 +1,142 @@ +package cmdutil + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "runtime/debug" + + "github.com/hashicorp/go-hclog" + "github.com/spf13/cobra" + "github.com/vercel/turborepo/cli/internal/client" + "github.com/vercel/turborepo/cli/internal/config" + tlogger "github.com/vercel/turborepo/cli/internal/logger" + "github.com/vercel/turborepo/cli/internal/process" +) + +const ( + // EnvLogLevel is the environment log level + EnvLogLevel = "TURBO_LOG_LEVEL" +) + +type Helper struct { + Config *config.Config + Logger *tlogger.Logger + Processes *process.Manager +} + +func (h *Helper) LogWarning(format string, args ...interface{}) error { + err := fmt.Errorf(format, args...) + h.Config.Logger.Warn("warning", err) + return h.Logger.Errorf(err.Error()) +} + +func (h *Helper) LogError(format string, args ...interface{}) error { + err := fmt.Errorf(format, args...) + h.Config.Logger.Error("error", err) + return h.Logger.Errorf(err.Error()) +} + +func (h *Helper) PreRun() func(cmd *cobra.Command, args []string) error { + return func(cmd *cobra.Command, args []string) error { + // To view a CPU trace, use "go tool trace [file]". Note that the trace + // viewer doesn't work under Windows Subsystem for Linux for some reason. + if h.Config.Trace != "" { + if done := createTraceFile(args, h.Config.Trace); done == nil { + os.Exit(1) + } else { + defer done() + } + } + + // To view a heap trace, use "go tool pprof [file]" and type "top". You can + // also drop it into https://speedscope.app and use the "left heavy" or + // "sandwich" view modes. + if h.Config.Heap != "" { + if done := createHeapFile(args, h.Config.Heap); done == nil { + os.Exit(1) + } else { + defer done() + } + } + + // To view a CPU profile, drop the file into https://speedscope.app. + // Note: Running the CPU profiler doesn't work under Windows subsystem for + // Linux. The profiler has to be built for native Windows and run using the + // command prompt instead. + if h.Config.CpuProfile != "" { + if done := createCpuprofileFile(args, h.Config.CpuProfile); done == nil { + os.Exit(1) + } else { + defer done() + } + } + + if h.Config.CpuProfile == "" { + // Disable the GC since we're just going to allocate a bunch of memory + // and then exit anyway. This speedup is not insignificant. Make sure to + // only do this here once we know that we're not going to be a long-lived + // process though. + if !h.Config.NoGC { + debug.SetGCPercent(-1) + } + } + + if !h.Config.NoColor { + os.Setenv("FORCE_COLOR", "1") + } + + // Determine our log level if we have any. First override we check if env var + level := hclog.NoLevel + if v := os.Getenv(EnvLogLevel); v != "" { + level = hclog.LevelFromString(v) + if level == hclog.NoLevel { + return h.Logger.Errorf("%s value %q is not a valid log level", EnvLogLevel, v) + } + } + + // Process arguments looking for `-v` flags to control the log level. + // This overrides whatever the env var set. + switch { + case h.Config.Level == 1: + if level == hclog.NoLevel || level > hclog.Info { + level = hclog.Info + } + case h.Config.Level == 2: + if level == hclog.NoLevel || level > hclog.Debug { + level = hclog.Debug + } + case h.Config.Level == 3: + if level == hclog.NoLevel || level > hclog.Trace { + level = hclog.Trace + } + default: + } + + // Default output is nowhere unless we enable logging. + var output io.Writer = ioutil.Discard + color := hclog.ColorOff + if level != hclog.NoLevel { + output = os.Stderr + color = hclog.AutoColor + } + + hlogger := hclog.New(&hclog.LoggerOptions{ + Name: cmd.Name(), + Level: level, + Color: color, + Output: output, + }) + + maxRemoteFailCount := 3 + apiClient := client.NewClient(h.Config.ApiUrl, hlogger, h.Config.Version, h.Config.TeamId, h.Config.TeamSlug, uint64(maxRemoteFailCount)) + + h.Config.Logger = hlogger + h.Config.ApiClient = apiClient + + h.Config.ApiClient.SetToken(h.Config.Token) + + return nil + } +} diff --git a/cli/internal/cmdutil/errors.go b/cli/internal/cmdutil/errors.go new file mode 100644 index 0000000000000..8fd45e0d8f452 --- /dev/null +++ b/cli/internal/cmdutil/errors.go @@ -0,0 +1,8 @@ +package cmdutil + +type Error struct { + ExitCode int + Err error +} + +func (e *Error) Error() string { return e.Err.Error() } diff --git a/cli/cmd/turbo/main_utils.go b/cli/internal/cmdutil/utils.go similarity index 98% rename from cli/cmd/turbo/main_utils.go rename to cli/internal/cmdutil/utils.go index 2d2b9a2c3ba82..58b40f73d3c77 100644 --- a/cli/cmd/turbo/main_utils.go +++ b/cli/internal/cmdutil/utils.go @@ -1,4 +1,4 @@ -package main +package cmdutil import ( "fmt" diff --git a/cli/internal/config/config.go b/cli/internal/config/config.go index 49ee6b696f6b8..a2be1d3b4724a 100644 --- a/cli/internal/config/config.go +++ b/cli/internal/config/config.go @@ -1,98 +1,53 @@ package config import ( - "fmt" - "io" - "io/ioutil" - "net/url" "os" "path/filepath" "runtime" - "strings" - "github.com/vercel/turborepo/cli/internal/client" - - hclog "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-hclog" "github.com/kelseyhightower/envconfig" "github.com/mattn/go-isatty" - "github.com/mitchellh/cli" -) - -const ( - // EnvLogLevel is the environment log level - EnvLogLevel = "TURBO_LOG_LEVEL" + "github.com/vercel/turborepo/cli/internal/client" + "github.com/vercel/turborepo/cli/internal/logger" ) -// IsCI returns true if running in a CI/CD environment func IsCI() bool { return !isatty.IsTerminal(os.Stdout.Fd()) || os.Getenv("CI") != "" } -// Config is a struct that contains user inputs and our logger type Config struct { - Logger hclog.Logger - // Bearer token - Token string - // vercel.com / remote cache team id - TeamId string - // vercel.com / remote cache team slug - TeamSlug string - // Backend API URL - ApiUrl string - // Login URL - LoginUrl string - // Backend retryable http client + Level int + Token string + TeamId string + TeamSlug string + ApiUrl string + LoginUrl string + Version string + NoColor bool + Logger hclog.Logger ApiClient *client.ApiClient - // Turborepo CLI Version - TurboVersion string - Cache *CacheConfig -} + Cache *CacheConfig -// IsLoggedIn returns true if we have a token and either a team id or team slug -func (c *Config) IsLoggedIn() bool { - return c.Token != "" && (c.TeamId != "" || c.TeamSlug != "") + NoGC bool + Heap string + Trace string + CpuProfile string } -// CacheConfig type CacheConfig struct { - // Number of async workers Workers int - // Cache directory - Dir string + Dir string } -// ParseAndValidate parses the cmd line flags / env vars, and verifies that all required -// flags have been set. Users can pass in flags when calling a subcommand, or set env vars -// with the prefix 'TURBO_'. If both values are set, the env var value will be used. -func ParseAndValidate(args []string, ui cli.Ui, turboVersion string) (c *Config, err error) { - - // Special check for ./turbo invocation without any args - // Return the help message - if len(args) == 0 { - args = append(args, "--help") - } - - // Pop the subcommand into 'cmd' - // flags.Parse does not work when the subcommand is included - cmd, inputFlags := args[0], args[1:] - - // Special check for help commands - // command is ./turbo --help or --version - if len(inputFlags) == 0 && (cmd == "help" || cmd == "--help" || cmd == "-help" || cmd == "version" || cmd == "--version" || cmd == "-version") { - return nil, nil - } - // command is ./turbo $subcommand --help - if len(inputFlags) == 1 && (inputFlags[0] == "help" || inputFlags[0] == "--help" || inputFlags[0] == "-help") { - return nil, nil - } - // Precedence is flags > env > config > default +func New(logger *logger.Logger, version string) (*Config, error) { userConfig, _ := ReadUserConfigFile() partialConfig, _ := ReadConfigFile(filepath.Join(".turbo", "config.json")) partialConfig.Token = userConfig.Token enverr := envconfig.Process("TURBO", partialConfig) if enverr != nil { - return nil, fmt.Errorf("invalid environment variable: %w", err) + return nil, logger.Errorf("invalid environment variable: %w", enverr) } if partialConfig.Token == "" && IsCI() { @@ -100,91 +55,22 @@ func ParseAndValidate(args []string, ui cli.Ui, turboVersion string) (c *Config, partialConfig.TeamId = os.Getenv("VERCEL_ARTIFACTS_OWNER") } - app := args[0] - - // Determine our log level if we have any. First override we check if env var - level := hclog.NoLevel - if v := os.Getenv(EnvLogLevel); v != "" { - level = hclog.LevelFromString(v) - if level == hclog.NoLevel { - return nil, fmt.Errorf("%s value %q is not a valid log level", EnvLogLevel, v) - } - } - - // Process arguments looking for `-v` flags to control the log level. - // This overrides whatever the env var set. - for _, arg := range args { - if len(arg) != 0 && arg[0] != '-' { - continue - } - switch { - case arg == "-v": - if level == hclog.NoLevel || level > hclog.Info { - level = hclog.Info - } - case arg == "-vv": - if level == hclog.NoLevel || level > hclog.Debug { - level = hclog.Debug - } - case arg == "-vvv": - if level == hclog.NoLevel || level > hclog.Trace { - level = hclog.Trace - } - case strings.HasPrefix(arg, "--api="): - apiUrl := arg[len("--api="):] - if _, err := url.ParseRequestURI(apiUrl); err != nil { - return nil, fmt.Errorf("%s is an invalid URL", apiUrl) - } - partialConfig.ApiUrl = apiUrl - case strings.HasPrefix(arg, "--url="): - loginUrl := arg[len("--url="):] - if _, err := url.ParseRequestURI(loginUrl); err != nil { - return nil, fmt.Errorf("%s is an invalid URL", loginUrl) - } - partialConfig.LoginUrl = loginUrl - case strings.HasPrefix(arg, "--token="): - partialConfig.Token = arg[len("--token="):] - case strings.HasPrefix(arg, "--team="): - partialConfig.TeamSlug = arg[len("--team="):] - default: - continue - } - } - - // Default output is nowhere unless we enable logging. - var output io.Writer = ioutil.Discard - color := hclog.ColorOff - if level != hclog.NoLevel { - output = os.Stderr - color = hclog.AutoColor - } - - logger := hclog.New(&hclog.LoggerOptions{ - Name: app, - Level: level, - Color: color, - Output: output, - }) - - maxRemoteFailCount := 3 - apiClient := client.NewClient(partialConfig.ApiUrl, logger, turboVersion, partialConfig.TeamId, partialConfig.TeamSlug, uint64(maxRemoteFailCount)) - - c = &Config{ - Logger: logger, - Token: partialConfig.Token, - TeamSlug: partialConfig.TeamSlug, - TeamId: partialConfig.TeamId, - ApiUrl: partialConfig.ApiUrl, - LoginUrl: partialConfig.LoginUrl, - ApiClient: apiClient, - TurboVersion: turboVersion, + cfg := &Config{ + Token: partialConfig.Token, + TeamSlug: partialConfig.TeamSlug, + TeamId: partialConfig.TeamId, + ApiUrl: partialConfig.ApiUrl, + LoginUrl: partialConfig.LoginUrl, + Version: version, Cache: &CacheConfig{ Workers: runtime.NumCPU() + 2, Dir: filepath.Join("node_modules", ".cache", "turbo"), }, } - c.ApiClient.SetToken(partialConfig.Token) + return cfg, nil +} - return c, nil +func (c *Config) IsAuthenticated() bool { + return c.Token != "" && (c.TeamId != "" || c.TeamSlug != "") } diff --git a/cli/internal/context/context.go b/cli/internal/context/context.go index 875da6c6bb197..5d0771014265e 100644 --- a/cli/internal/context/context.go +++ b/cli/internal/context/context.go @@ -9,7 +9,11 @@ import ( "strings" "sync" + "github.com/Masterminds/semver" + "github.com/deckarep/golang-set" "github.com/hashicorp/go-hclog" + "github.com/pyr-sh/dag" + gitignore "github.com/sabhiram/go-gitignore" "github.com/vercel/turborepo/cli/internal/api" "github.com/vercel/turborepo/cli/internal/backends" "github.com/vercel/turborepo/cli/internal/config" @@ -17,11 +21,6 @@ import ( "github.com/vercel/turborepo/cli/internal/fs" "github.com/vercel/turborepo/cli/internal/globby" "github.com/vercel/turborepo/cli/internal/util" - - "github.com/Masterminds/semver" - mapset "github.com/deckarep/golang-set" - "github.com/pyr-sh/dag" - gitignore "github.com/sabhiram/go-gitignore" "golang.org/x/sync/errgroup" ) @@ -145,7 +144,7 @@ func WithGraph(rootpath string, config *config.Config) Option { if !fs.FileExists(turboJSONPath) { if rootPackageJSON.LegacyTurboConfig == nil { // TODO: suggestion on how to create one - return fmt.Errorf("Could not find turbo.json. Follow directions at https://turborepo.org/docs/getting-started to create one") + return fmt.Errorf("could not find turbo.json. Follow directions at https://turborepo.org/docs/getting-started to create one") } else { log.Println("[WARNING] Turbo configuration now lives in \"turbo.json\". Migrate to turbo.json by running \"npx @turbo/codemod create-turbo-config\"") c.TurboConfig = rootPackageJSON.LegacyTurboConfig @@ -183,12 +182,14 @@ func WithGraph(rootpath string, config *config.Config) Option { c.RootPackageInfo = rootPackageJSON spaces, err := c.Backend.GetWorkspaceGlobs(rootpath) - if err != nil { return fmt.Errorf("could not detect workspaces: %w", err) } globalHash, err := calculateGlobalHash(rootpath, rootPackageJSON, c.TurboConfig.GlobalDependencies, c.Backend, config.Logger, os.Environ()) + if err != nil { + return fmt.Errorf("couldn't calculate global hash: %w", err) + } c.GlobalHash = globalHash // We will parse all package.json's simultaneously. We use a // wait group because we cannot fully populate the graph (the next step) @@ -459,7 +460,6 @@ func (c *Context) resolveDepGraph(wg *sync.WaitGroup, unresolvedDirectDeps map[s if len(entry.OptionalDependencies) > 0 { c.resolveDepGraph(wg, entry.OptionalDependencies, resolvedDepsSet, seen, pkg) } - }(directDepName, unresolvedVersion) } } diff --git a/cli/internal/core/scheduler.go b/cli/internal/core/scheduler.go index bb1e9e8282767..a1a020d481677 100644 --- a/cli/internal/core/scheduler.go +++ b/cli/internal/core/scheduler.go @@ -4,9 +4,8 @@ import ( "fmt" "strings" - "github.com/vercel/turborepo/cli/internal/util" - "github.com/pyr-sh/dag" + "github.com/vercel/turborepo/cli/internal/util" ) const ROOT_NODE_NAME = "___ROOT___" diff --git a/cli/internal/core/scheduler_test.go b/cli/internal/core/scheduler_test.go index 0245667c4fa5c..0360c8d1874d9 100644 --- a/cli/internal/core/scheduler_test.go +++ b/cli/internal/core/scheduler_test.go @@ -5,9 +5,8 @@ import ( "strings" "testing" - "github.com/vercel/turborepo/cli/internal/util" - "github.com/pyr-sh/dag" + "github.com/vercel/turborepo/cli/internal/util" ) func testVisitor(taskID string) error { diff --git a/cli/internal/fs/hash.go b/cli/internal/fs/hash.go index a5c0c6aa01100..4bec012a4c13d 100644 --- a/cli/internal/fs/hash.go +++ b/cli/internal/fs/hash.go @@ -7,6 +7,7 @@ import ( "io" "os" "strconv" + "github.com/vercel/turborepo/cli/internal/xxhash" ) diff --git a/cli/internal/fs/package_deps_hash.go b/cli/internal/fs/package_deps_hash.go index a8c75fa109019..0ffdd0a7021a1 100644 --- a/cli/internal/fs/package_deps_hash.go +++ b/cli/internal/fs/package_deps_hash.go @@ -139,7 +139,6 @@ func UnescapeChars(in []byte) []byte { // gitLsTree executes "git ls-tree" in a folder func gitLsTree(path string, gitPath string) (string, error) { - cmd := exec.Command("git", "ls-tree", "HEAD", "-r") cmd.Dir = path out, err := cmd.CombinedOutput() diff --git a/cli/internal/globby/globby.go b/cli/internal/globby/globby.go index a3b17913c64af..71382fe9bdc09 100644 --- a/cli/internal/globby/globby.go +++ b/cli/internal/globby/globby.go @@ -1,12 +1,11 @@ package globby import ( - "github.com/vercel/turborepo/cli/internal/fs" - "path/filepath" "strings" "github.com/bmatcuk/doublestar/v4" + "github.com/vercel/turborepo/cli/internal/fs" ) func GlobFiles(basePath string, includePatterns []string, excludePatterns []string) []string { diff --git a/cli/internal/info/bin.go b/cli/internal/info/bin.go deleted file mode 100644 index a9b4f2673e836..0000000000000 --- a/cli/internal/info/bin.go +++ /dev/null @@ -1,54 +0,0 @@ -package info - -import ( - "fmt" - "os" - "strings" - "github.com/vercel/turborepo/cli/internal/config" - "github.com/vercel/turborepo/cli/internal/ui" - - "github.com/fatih/color" - "github.com/hashicorp/go-hclog" - "github.com/mitchellh/cli" -) - -type BinCommand struct { - Config *config.Config - Ui *cli.ColoredUi -} - -// Synopsis of run command -func (c *BinCommand) Synopsis() string { - return "Get the path to the Turbo binary" -} - -// Help returns information about the `bin` command -func (c *BinCommand) Help() string { - helpText := ` -Usage: turbo bin - - Get the path to the Turbo binary -` - return strings.TrimSpace(helpText) -} - -func (c *BinCommand) Run(args []string) int { - path, err := os.Executable() - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("could not get path to turbo binary: %w", err)) - return 1 - } - c.Ui.Output(path) - return 0 -} - -// logError logs an error and outputs it to the UI. -func (c *BinCommand) logError(log hclog.Logger, prefix string, err error) { - log.Error(prefix, "error", err) - - if prefix != "" { - prefix += ": " - } - - c.Ui.Error(fmt.Sprintf("%s%s%s", ui.ERROR_PREFIX, prefix, color.RedString(" %v", err))) -} diff --git a/cli/internal/logger/concurrent.go b/cli/internal/logger/concurrent.go new file mode 100644 index 0000000000000..2aa41b106f9fd --- /dev/null +++ b/cli/internal/logger/concurrent.go @@ -0,0 +1,47 @@ +package logger + +import ( + "io" + "os" + "sync" +) + +type ConcurrentLogger struct { + logger *Logger + Out io.Writer + mutex sync.Mutex +} + +func NewConcurrent(logger *Logger) *ConcurrentLogger { + return &ConcurrentLogger{ + Out: os.Stdout, + } +} + +func (l *ConcurrentLogger) Printf(format string, args ...interface{}) { + l.mutex.Lock() + defer l.mutex.Unlock() + + l.logger.Printf(format, args...) +} + +func (l *ConcurrentLogger) Sucessf(format string, args ...interface{}) string { + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.logger.Sucessf(format, args...) +} + +func (l *ConcurrentLogger) Warnf(format string, args ...interface{}) error { + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.logger.Warnf(format, args...) +} + +func (l *ConcurrentLogger) Errorf(format string, args ...interface{}) error { + l.mutex.Lock() + defer l.mutex.Unlock() + + return l.logger.Errorf(format, args...) +} diff --git a/cli/internal/logger/logger.go b/cli/internal/logger/logger.go new file mode 100644 index 0000000000000..5cfd390d6d174 --- /dev/null +++ b/cli/internal/logger/logger.go @@ -0,0 +1,47 @@ +package logger + +import ( + "fmt" + "io" + "os" + + "github.com/fatih/color" + "github.com/mattn/go-isatty" + "github.com/vercel/turborepo/cli/internal/util" +) + +var IsTTY = isatty.IsTerminal(os.Stdout.Fd()) || isatty.IsCygwinTerminal(os.Stdout.Fd()) +var IsCI = os.Getenv("CI") == "true" || os.Getenv("BUILD_NUMBER") == "true" || os.Getenv("TEAMCITY_VERSION") != "" + +var successPrefix = color.New(color.Bold, color.FgGreen, color.ReverseVideo).Sprint(" SUCCESS ") +var warningPrefix = color.New(color.Bold, color.FgYellow, color.ReverseVideo).Sprint(" WARNING ") +var errorPrefix = color.New(color.Bold, color.FgRed, color.ReverseVideo).Sprint(" ERROR ") + +type Logger struct { + Out io.Writer +} + +func New() *Logger { + return &Logger{ + Out: os.Stdout, + } +} + +func (l *Logger) Printf(format string, args ...interface{}) { + fmt.Fprintln(os.Stdout, util.Sprintf(format, args...)) +} + +func (l *Logger) Sucessf(format string, args ...interface{}) string { + msg := fmt.Sprintf(format, args...) + return fmt.Sprintf("%s%s", successPrefix, color.GreenString(" %v", msg)) +} + +func (l *Logger) Warnf(format string, args ...interface{}) error { + err := fmt.Errorf(format, args...) + return fmt.Errorf("%s%s", warningPrefix, color.YellowString(" %v", err)) +} + +func (l *Logger) Errorf(format string, args ...interface{}) error { + err := fmt.Errorf(format, args...) + return fmt.Errorf("%s%s", errorPrefix, color.RedString(" %v", err)) +} diff --git a/cli/internal/logger/prefixed.go b/cli/internal/logger/prefixed.go new file mode 100644 index 0000000000000..90221cdb79967 --- /dev/null +++ b/cli/internal/logger/prefixed.go @@ -0,0 +1,54 @@ +package logger + +import ( + "fmt" + "io" + "os" + + "github.com/fatih/color" + "github.com/vercel/turborepo/cli/internal/util" +) + +type PrefixedLogger struct { + Out io.Writer + + outputPrefix string + successPrefix string + warningPrefix string + errorPrefix string +} + +func NewPrefixed(outputPrefix, successPrefix, warningPrefix, errorPrefix string) *PrefixedLogger { + return &PrefixedLogger{ + Out: os.Stdout, + + outputPrefix: outputPrefix, + successPrefix: successPrefix, + warningPrefix: warningPrefix, + errorPrefix: errorPrefix, + } +} + +func (l *PrefixedLogger) Printf(format string, args ...interface{}) { + fmt.Fprintln(l.Out, util.Sprintf(format, args...)) +} + +func (l *PrefixedLogger) Sucessf(format string, args ...interface{}) string { + msg := fmt.Sprintf(format, args...) + return fmt.Sprintf("%s%s%s", successPrefix, l.successPrefix, color.GreenString("%v", msg)) +} + +func (l *PrefixedLogger) Warnf(format string, args ...interface{}) error { + err := fmt.Errorf(format, args...) + return fmt.Errorf("%s%s%s", warningPrefix, l.warningPrefix, color.YellowString("%v", err)) +} + +func (l *PrefixedLogger) Errorf(format string, args ...interface{}) error { + err := fmt.Errorf(format, args...) + return fmt.Errorf("%s%s%s", errorPrefix, l.errorPrefix, color.RedString("%v", err)) +} + +func (l *PrefixedLogger) Output(format string, args ...interface{}) { + msg := fmt.Sprintf(format, args...) + l.Printf("%s%s", l.outputPrefix, msg) +} diff --git a/cli/internal/login/link.go b/cli/internal/login/link.go deleted file mode 100644 index 4ec7f8ba91ffe..0000000000000 --- a/cli/internal/login/link.go +++ /dev/null @@ -1,175 +0,0 @@ -package login - -import ( - "fmt" - "os/exec" - "path/filepath" - "strings" - "github.com/vercel/turborepo/cli/internal/client" - "github.com/vercel/turborepo/cli/internal/config" - "github.com/vercel/turborepo/cli/internal/fs" - "github.com/vercel/turborepo/cli/internal/ui" - "github.com/vercel/turborepo/cli/internal/util" - - "github.com/AlecAivazis/survey/v2" - "github.com/fatih/color" - "github.com/mitchellh/cli" - "github.com/mitchellh/go-homedir" -) - -// LinkCommand is a Command implementation allows the user to link your local directory to a Turbrepo -type LinkCommand struct { - Config *config.Config - Ui *cli.ColoredUi -} - -// Synopsis of link command -func (c *LinkCommand) Synopsis() string { - return "Link your local directory to a Vercel organization and enable remote caching." -} - -// Help returns information about the `link` command -func (c *LinkCommand) Help() string { - helpText := ` -Usage: turbo link - - Link your local directory to a Vercel organization and enable remote caching. - -Options: - --help Show this screen. - --no-gitignore Do not create or modify .gitignore - (default false) -` - return strings.TrimSpace(helpText) -} - -// Run links a local directory to a Vercel organization and enables remote caching -func (c *LinkCommand) Run(args []string) int { - var dontModifyGitIgnore bool - shouldSetup := true - dir, homeDirErr := homedir.Dir() - if homeDirErr != nil { - c.logError(fmt.Errorf("could not find home directory.\n%w", homeDirErr)) - return 1 - } - c.Ui.Info(">>> Remote Caching (beta)") - c.Ui.Info("") - c.Ui.Info(" Remote Caching shares your cached Turborepo task outputs and logs across") - c.Ui.Info(" all your team’s Vercel projects. It also can share outputs") - c.Ui.Info(" with other services that enable Remote Caching, like CI/CD systems.") - c.Ui.Info(" This results in faster build times and deployments for your team.") - c.Ui.Info(util.Sprintf(" For more info, see ${UNDERLINE}https://turborepo.org/docs/features/remote-caching${RESET}")) - c.Ui.Info("") - currentDir, fpErr := filepath.Abs(".") - if fpErr != nil { - c.logError(fmt.Errorf("could figure out file path.\n%w", fpErr)) - return 1 - } - - survey.AskOne( - &survey.Confirm{ - Default: true, - Message: util.Sprintf("Would you like to enable Remote Caching for ${CYAN}${BOLD}\"%s\"${RESET}?", strings.Replace(currentDir, dir, "~", 1)), - }, - &shouldSetup, survey.WithValidator(survey.Required), - survey.WithIcons(func(icons *survey.IconSet) { - // for more information on formatting the icons, see here: https://github.com/mgutz/ansi#style-format - icons.Question.Format = "gray+hb" - })) - - if !shouldSetup { - c.Ui.Info("> Canceled.") - return 1 - } - - if c.Config.Token == "" { - c.logError(fmt.Errorf(util.Sprintf("User not found. Please login to Turborepo first by running ${BOLD}`npx turbo login`${RESET}."))) - return 1 - } - - teamsResponse, err := c.Config.ApiClient.GetTeams() - if err != nil { - c.logError(fmt.Errorf("could not get team information.\n%w", err)) - return 1 - } - userResponse, err := c.Config.ApiClient.GetUser() - if err != nil { - c.logError(fmt.Errorf("could not get user information.\n%w", err)) - return 1 - } - - var chosenTeam client.Team - - teamOptions := make([]string, len(teamsResponse.Teams)) - - // Gather team options - for i, team := range teamsResponse.Teams { - teamOptions[i] = team.Name - } - - var chosenTeamName string - nameWithFallback := userResponse.User.Name - if nameWithFallback == "" { - nameWithFallback = userResponse.User.Username - } - survey.AskOne( - &survey.Select{ - Message: "Which Vercel scope (and Remote Cache) do you want associate with this Turborepo? ", - Options: append([]string{nameWithFallback}, teamOptions...), - }, - &chosenTeamName, - survey.WithValidator(survey.Required), - survey.WithIcons(func(icons *survey.IconSet) { - // for more information on formatting the icons, see here: https://github.com/mgutz/ansi#style-format - icons.Question.Format = "gray+hb" - })) - - if chosenTeamName == "" { - c.Ui.Info("Canceled. Turborepo not set up.") - return 1 - } else if (chosenTeamName == userResponse.User.Name) || (chosenTeamName == userResponse.User.Username) { - chosenTeam = client.Team{ - ID: userResponse.User.ID, - Name: userResponse.User.Name, - Slug: userResponse.User.Username, - } - } else { - for _, team := range teamsResponse.Teams { - if team.Name == chosenTeamName { - chosenTeam = team - break - } - } - } - fs.EnsureDir(filepath.Join(".turbo", "config.json")) - fsErr := config.WriteRepoConfigFile(&config.TurborepoConfig{ - TeamId: chosenTeam.ID, - ApiUrl: c.Config.ApiUrl, - }) - if fsErr != nil { - c.logError(fmt.Errorf("could not link current directory to team/user.\n%w", fsErr)) - return 1 - } - - if !dontModifyGitIgnore { - fs.EnsureDir(".gitignore") - _, gitIgnoreErr := exec.Command("sh", "-c", "grep -qxF '.turbo' .gitignore || echo '.turbo' >> .gitignore").CombinedOutput() - if err != nil { - c.logError(fmt.Errorf("could find or update .gitignore.\n%w", gitIgnoreErr)) - return 1 - } - } - - c.Ui.Info("") - c.Ui.Info(util.Sprintf("%s${RESET} Turborepo CLI authorized for ${BOLD}%s${RESET}", ui.Rainbow(">>> Success!"), chosenTeam.Name)) - c.Ui.Info("") - c.Ui.Info(util.Sprintf("${GREY}To disable Remote Caching, run `npx turbo unlink`${RESET}")) - c.Ui.Info("") - return 0 -} - -// logError logs an error and outputs it to the UI. -func (c *LinkCommand) logError(err error) { - c.Config.Logger.Error("error", err) - c.Ui.Error(fmt.Sprintf("%s%s", ui.ERROR_PREFIX, color.RedString(" %v", err))) -} diff --git a/cli/internal/login/login.go b/cli/internal/login/login.go deleted file mode 100644 index 0c6db8ed5fc5f..0000000000000 --- a/cli/internal/login/login.go +++ /dev/null @@ -1,330 +0,0 @@ -package login - -import ( - "context" - "fmt" - "net" - "net/http" - "net/url" - "os" - "os/signal" - "strings" - - "github.com/pkg/errors" - "github.com/vercel/turborepo/cli/internal/client" - "github.com/vercel/turborepo/cli/internal/config" - "github.com/vercel/turborepo/cli/internal/ui" - "github.com/vercel/turborepo/cli/internal/util" - "github.com/vercel/turborepo/cli/internal/util/browser" - - "github.com/fatih/color" - "github.com/mitchellh/cli" - "github.com/spf13/cobra" -) - -// LoginCommand is a Command implementation allows the user to login to turbo -type LoginCommand struct { - Config *config.Config - UI *cli.ColoredUi -} - -// Synopsis of run command -func (c *LoginCommand) Synopsis() string { - return "Login to your Vercel account" -} - -// Help returns information about the `run` command. Match the cobra output for now, until -// we can wire up cobra for real -func (c *LoginCommand) Help() string { - helpText := ` -Login to your Vercel account - -Usage: - turbo login [flags] - -Flags: - --sso-team string attempt to authenticate to the specified team using SSO -` - return strings.TrimSpace(helpText) -} - -const defaultHostname = "127.0.0.1" -const defaultPort = 9789 -const defaultSSOProvider = "SAML/OIDC Single Sign-On" - -// Run logs into the api with PKCE and writes the token to turbo user config directory -func (c *LoginCommand) Run(args []string) int { - var ssoTeam string - loginCommand := &cobra.Command{ - Use: "turbo login", - Short: "Login to your Vercel account", - RunE: func(cmd *cobra.Command, args []string) error { - deps := loginDeps{ - ui: c.UI, - openURL: browser.OpenBrowser, - client: c.Config.ApiClient, - writeUserConfig: config.WriteUserConfigFile, - writeRepoConfig: config.WriteRepoConfigFile, - } - if ssoTeam != "" { - return loginSSO(c.Config, ssoTeam, deps) - } - return run(c.Config, deps) - }, - } - loginCommand.Flags().StringVar(&ssoTeam, "sso-team", "", "attempt to authenticate to the specified team using SSO") - loginCommand.SetArgs(args) - err := loginCommand.Execute() - if err != nil { - c.Config.Logger.Error("error", err) - c.UI.Error(fmt.Sprintf("%s%s", ui.ERROR_PREFIX, color.RedString(" %v", err))) - return 1 - } - return 0 -} - -type browserClient = func(url string) error -type userClient interface { - SetToken(token string) - GetUser() (*client.UserResponse, error) - VerifySSOToken(token string, tokenName string) (*client.VerifiedSSOUser, error) -} -type configWriter = func(cf *config.TurborepoConfig) error - -type loginDeps struct { - ui *cli.ColoredUi - openURL browserClient - client userClient - writeUserConfig configWriter - writeRepoConfig configWriter -} - -func run(c *config.Config, deps loginDeps) error { - c.Logger.Debug(fmt.Sprintf("turbo v%v", c.TurboVersion)) - c.Logger.Debug(fmt.Sprintf("api url: %v", c.ApiUrl)) - c.Logger.Debug(fmt.Sprintf("login url: %v", c.LoginUrl)) - redirectURL := fmt.Sprintf("http://%v:%v", defaultHostname, defaultPort) - loginURL := fmt.Sprintf("%v/turborepo/token?redirect_uri=%v", c.LoginUrl, redirectURL) - deps.ui.Info(util.Sprintf(">>> Opening browser to %v", c.LoginUrl)) - - rootctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) - defer cancel() - - var query url.Values - oss, err := newOneShotServer(rootctx, func(w http.ResponseWriter, r *http.Request) { - query = r.URL.Query() - http.Redirect(w, r, c.LoginUrl+"/turborepo/success", http.StatusFound) - }, defaultPort) - if err != nil { - return errors.Wrap(err, "failed to start local server") - } - - s := ui.NewSpinner(os.Stdout) - err = deps.openURL(loginURL) - if err != nil { - return errors.Wrapf(err, "failed to open %v", loginURL) - } - s.Start("Waiting for your authorization...") - err = oss.Wait() - if err != nil { - return errors.Wrap(err, "failed to shut down local server") - } - // Stop the spinner before we return to ensure terminal is left in a good state - s.Stop("") - - deps.writeUserConfig(&config.TurborepoConfig{Token: query.Get("token")}) - rawToken := query.Get("token") - deps.client.SetToken(rawToken) - userResponse, err := deps.client.GetUser() - if err != nil { - return errors.Wrap(err, "could not get user information") - } - deps.ui.Info("") - deps.ui.Info(util.Sprintf("%s Turborepo CLI authorized for %s${RESET}", ui.Rainbow(">>> Success!"), userResponse.User.Email)) - deps.ui.Info("") - deps.ui.Info(util.Sprintf("${CYAN}To connect to your Remote Cache. Run the following in the${RESET}")) - deps.ui.Info(util.Sprintf("${CYAN}root of any turborepo:${RESET}")) - deps.ui.Info("") - deps.ui.Info(util.Sprintf(" ${BOLD}npx turbo link${RESET}")) - deps.ui.Info("") - return nil -} - -func loginSSO(c *config.Config, ssoTeam string, deps loginDeps) error { - redirectURL := fmt.Sprintf("http://%v:%v", defaultHostname, defaultPort) - query := make(url.Values) - query.Add("teamId", ssoTeam) - query.Add("mode", "login") - query.Add("next", redirectURL) - loginURL := fmt.Sprintf("%v/api/auth/sso?%v", c.LoginUrl, query.Encode()) - - rootctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt) - defer cancel() - - var verificationToken string - oss, err := newOneShotServer(rootctx, func(w http.ResponseWriter, r *http.Request) { - token, location := getTokenAndRedirect(r.URL.Query()) - verificationToken = token - http.Redirect(w, r, location, http.StatusFound) - }, defaultPort) - if err != nil { - return errors.Wrap(err, "failed to start local server") - } - s := ui.NewSpinner(os.Stdout) - err = deps.openURL(loginURL) - if err != nil { - return errors.Wrapf(err, "failed to open %v", loginURL) - } - s.Start("Waiting for your authorization...") - err = oss.Wait() - if err != nil { - return errors.Wrap(err, "failed to shut down local server") - } - // Stop the spinner before we return to ensure terminal is left in a good state - s.Stop("") - // open https://vercel.com/api/auth/sso?teamId=&mode=login - if verificationToken == "" { - return errors.New("no token auth token found") - } - - // We now have a verification token. We need to pass it to the verification endpoint - // to get an actual token. - tokenName, err := makeTokenName() - if err != nil { - return errors.Wrap(err, "failed to make sso token name") - } - verifiedUser, err := deps.client.VerifySSOToken(verificationToken, tokenName) - if err != nil { - return errors.Wrap(err, "failed to verify SSO token") - } - - deps.client.SetToken(verifiedUser.Token) - userResponse, err := deps.client.GetUser() - if err != nil { - return errors.Wrap(err, "could not get user information") - } - err = deps.writeUserConfig(&config.TurborepoConfig{Token: verifiedUser.Token}) - if err != nil { - return errors.Wrap(err, "failed to save auth token") - } - deps.ui.Info("") - deps.ui.Info(util.Sprintf("%s Turborepo CLI authorized for %s${RESET}", ui.Rainbow(">>> Success!"), userResponse.User.Email)) - deps.ui.Info("") - if verifiedUser.TeamID != "" { - err = deps.writeRepoConfig(&config.TurborepoConfig{TeamId: verifiedUser.TeamID, ApiUrl: c.ApiUrl}) - if err != nil { - return errors.Wrap(err, "failed to save teamId") - } - } else { - - deps.ui.Info(util.Sprintf("${CYAN}To connect to your Remote Cache. Run the following in the${RESET}")) - deps.ui.Info(util.Sprintf("${CYAN}root of any turborepo:${RESET}")) - deps.ui.Info("") - deps.ui.Info(util.Sprintf(" ${BOLD}npx turbo link${RESET}")) - } - deps.ui.Info("") - return nil -} - -func getTokenAndRedirect(params url.Values) (string, string) { - locationStub := "https://vercel.com/notifications/cli-login-" - if loginError := params.Get("loginError"); loginError != "" { - outParams := make(url.Values) - outParams.Add("loginError", loginError) - return "", locationStub + "failed?" + outParams.Encode() - } - if ssoEmail := params.Get("ssoEmail"); ssoEmail != "" { - outParams := make(url.Values) - outParams.Add("ssoEmail", ssoEmail) - if teamName := params.Get("teamName"); teamName != "" { - outParams.Add("teamName", teamName) - } - if ssoType := params.Get("ssoType"); ssoType != "" { - outParams.Add("ssoType", ssoType) - } - return "", locationStub + "incomplete?" + outParams.Encode() - } - token := params.Get("token") - location := locationStub + "success" - if email := params.Get("email"); email != "" { - outParams := make(url.Values) - outParams.Add("email", email) - location += "?" + outParams.Encode() - } - return token, location -} - -type oneShotServer struct { - Port uint16 - requestDone chan struct{} - serverDone chan struct{} - serverErr error - ctx context.Context - srv *http.Server -} - -func newOneShotServer(ctx context.Context, handler http.HandlerFunc, port uint16) (*oneShotServer, error) { - requestDone := make(chan struct{}) - serverDone := make(chan struct{}) - mux := http.NewServeMux() - srv := &http.Server{Handler: mux} - oss := &oneShotServer{ - Port: port, - requestDone: requestDone, - serverDone: serverDone, - ctx: ctx, - srv: srv, - } - mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - handler(w, r) - close(oss.requestDone) - }) - err := oss.start(handler) - if err != nil { - return nil, err - } - return oss, nil -} - -func (oss *oneShotServer) start(handler http.HandlerFunc) error { - // Start listening immediately to handle race with user interaction - // This is mostly for testing, but would otherwise still technically be - // a race condition. - addr := defaultHostname + ":" + fmt.Sprint(oss.Port) - l, err := net.Listen("tcp", addr) - if err != nil { - return err - } - go func() { - if err := oss.srv.Serve(l); err != nil && !errors.Is(err, http.ErrServerClosed) { - oss.serverErr = errors.Wrap(err, "could not activate device. Please try again") - } - close(oss.serverDone) - }() - return nil -} - -func (oss *oneShotServer) Wait() error { - select { - case <-oss.requestDone: - case <-oss.ctx.Done(): - } - return oss.closeServer() -} - -func (oss *oneShotServer) closeServer() error { - err := oss.srv.Shutdown(oss.ctx) - if err != nil { - return err - } - <-oss.serverDone - return oss.serverErr -} - -func makeTokenName() (string, error) { - host, err := os.Hostname() - if err != nil { - return "", err - } - return fmt.Sprintf("Turbo CLI on %v via %v", host, defaultSSOProvider), nil -} diff --git a/cli/internal/login/login_test.go b/cli/internal/login/login_test.go deleted file mode 100644 index 6e434d04e974f..0000000000000 --- a/cli/internal/login/login_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package login - -import ( - "fmt" - "net/http" - "net/url" - "os" - "testing" - - "github.com/hashicorp/go-hclog" - "github.com/vercel/turborepo/cli/internal/client" - "github.com/vercel/turborepo/cli/internal/config" - "github.com/vercel/turborepo/cli/internal/ui" -) - -type dummyClient struct { - setToken string - createdSSOTokenName string -} - -func (d *dummyClient) SetToken(t string) { - d.setToken = t -} - -func (d *dummyClient) GetUser() (*client.UserResponse, error) { - return &client.UserResponse{}, nil -} - -func (d *dummyClient) VerifySSOToken(token string, tokenName string) (*client.VerifiedSSOUser, error) { - d.createdSSOTokenName = tokenName - return &client.VerifiedSSOUser{ - Token: "actual-sso-token", - TeamID: "sso-team-id", - }, nil -} - -var logger = hclog.Default() -var cf = &config.Config{ - Logger: logger, - TurboVersion: "test", - ApiUrl: "api-url", - LoginUrl: "login-url", -} - -type testResult struct { - clientErr error - userConfigWritten *config.TurborepoConfig - repoConfigWritten *config.TurborepoConfig - clientTokenWritten string - openedURL string - stepCh chan struct{} - client dummyClient -} - -func (tr *testResult) Deps() loginDeps { - urlOpener := func(url string) error { - tr.openedURL = url - tr.stepCh <- struct{}{} - return nil - } - return loginDeps{ - ui: ui.Default(), - openURL: urlOpener, - client: &tr.client, - writeUserConfig: func(cf *config.TurborepoConfig) error { - tr.userConfigWritten = cf - return nil - }, - writeRepoConfig: func(cf *config.TurborepoConfig) error { - tr.repoConfigWritten = cf - return nil - }, - } -} - -func newTest(redirectedURL string) *testResult { - stepCh := make(chan struct{}, 1) - tr := &testResult{ - stepCh: stepCh, - } - // When it's time, do the redirect - go func() { - <-tr.stepCh - client := &http.Client{ - CheckRedirect: func(req *http.Request, via []*http.Request) error { - return http.ErrUseLastResponse - }, - } - resp, err := client.Get(redirectedURL) - if err != nil { - tr.clientErr = err - } else if resp != nil && resp.StatusCode != http.StatusFound { - tr.clientErr = fmt.Errorf("invalid status %v", resp.StatusCode) - } - tr.stepCh <- struct{}{} - }() - return tr -} - -func Test_run(t *testing.T) { - test := newTest("http://127.0.0.1:9789/?token=my-token") - err := run(cf, test.Deps()) - if err != nil { - t.Errorf("expected to succeed, got error %v", err) - } - <-test.stepCh - if test.clientErr != nil { - t.Errorf("test client had error %v", test.clientErr) - } - - expectedURL := "login-url/turborepo/token?redirect_uri=http://127.0.0.1:9789" - if test.openedURL != expectedURL { - t.Errorf("openedURL got %v, want %v", test.openedURL, expectedURL) - } - - if test.userConfigWritten.Token != "my-token" { - t.Errorf("config token got %v, want my-token", test.userConfigWritten.Token) - } - if test.client.setToken != "my-token" { - t.Errorf("user client token got %v, want my-token", test.client.setToken) - } -} - -func Test_sso(t *testing.T) { - redirectParams := make(url.Values) - redirectParams.Add("token", "verification-token") - redirectParams.Add("email", "test@example.com") - test := newTest("http://127.0.0.1:9789/?" + redirectParams.Encode()) - err := loginSSO(cf, "my-team", test.Deps()) - if err != nil { - t.Errorf("expected to succeed, got error %v", err) - } - <-test.stepCh - if test.clientErr != nil { - t.Errorf("test client had error %v", test.clientErr) - } - host, err := os.Hostname() - if err != nil { - t.Errorf("failed to get hostname %v", err) - } - expectedTokenName := fmt.Sprintf("Turbo CLI on %v via SAML/OIDC Single Sign-On", host) - if test.client.createdSSOTokenName != expectedTokenName { - t.Errorf("created sso token got %v want %v", test.client.createdSSOTokenName, expectedTokenName) - } - expectedToken := "actual-sso-token" - if test.client.setToken != expectedToken { - t.Errorf("user client token got %v, want %v", test.client.setToken, expectedToken) - } - if test.userConfigWritten.Token != expectedToken { - t.Errorf("user config token got %v want %v", test.userConfigWritten.Token, expectedToken) - } - expectedTeamID := "sso-team-id" - if test.repoConfigWritten.TeamId != expectedTeamID { - t.Errorf("repo config team id got %v want %v", test.repoConfigWritten.TeamId, expectedTeamID) - } - if test.repoConfigWritten.Token != "" { - t.Errorf("repo config file token, got %v want empty string", test.repoConfigWritten.Token) - } -} diff --git a/cli/internal/login/logout.go b/cli/internal/login/logout.go deleted file mode 100644 index 2c06fa08c2465..0000000000000 --- a/cli/internal/login/logout.go +++ /dev/null @@ -1,55 +0,0 @@ -package login - -import ( - "fmt" - "strings" - "github.com/vercel/turborepo/cli/internal/config" - "github.com/vercel/turborepo/cli/internal/ui" - "github.com/vercel/turborepo/cli/internal/util" - - "github.com/fatih/color" - "github.com/hashicorp/go-hclog" - "github.com/mitchellh/cli" -) - -// LogoutCommand is a Command implementation allows the user to login to turbo -type LogoutCommand struct { - Config *config.Config - Ui *cli.ColoredUi -} - -// Synopsis of run command -func (c *LogoutCommand) Synopsis() string { - return "Logout of your Vercel account" -} - -// Help returns information about the `run` command -func (c *LogoutCommand) Help() string { - helpText := ` -Usage: turbo logout - - Logout of your Vercel account -` - return strings.TrimSpace(helpText) -} - -// Run executes tasks in the monorepo -func (c *LogoutCommand) Run(args []string) int { - if err := config.DeleteUserConfigFile(); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("could not logout. Something went wrong: %w", err)) - return 1 - } - c.Ui.Info(util.Sprintf("${GREY}>>> Logged out${RESET}")) - return 0 -} - -// logError logs an error and outputs it to the UI. -func (c *LogoutCommand) logError(log hclog.Logger, prefix string, err error) { - log.Error(prefix, "error", err) - - if prefix != "" { - prefix += ": " - } - - c.Ui.Error(fmt.Sprintf("%s%s%s", ui.ERROR_PREFIX, prefix, color.RedString(" %v", err))) -} diff --git a/cli/internal/login/unlink.go b/cli/internal/login/unlink.go deleted file mode 100644 index f376821707fac..0000000000000 --- a/cli/internal/login/unlink.go +++ /dev/null @@ -1,56 +0,0 @@ -package login - -import ( - "fmt" - "strings" - - "github.com/vercel/turborepo/cli/internal/config" - "github.com/vercel/turborepo/cli/internal/ui" - "github.com/vercel/turborepo/cli/internal/util" - - "github.com/fatih/color" - "github.com/hashicorp/go-hclog" - "github.com/mitchellh/cli" -) - -// UnlinkCommand is a Command implementation allows the user to login to turbo -type UnlinkCommand struct { - Config *config.Config - Ui *cli.ColoredUi -} - -// Synopsis of run command -func (c *UnlinkCommand) Synopsis() string { - return "Unlink the current directory from your Vercel organization and disable Remote Caching (beta)." -} - -// Help returns information about the `run` command -func (c *UnlinkCommand) Help() string { - helpText := ` -Usage: turbo unlink - - Unlink the current directory from your Vercel organization and disable Remote Caching (beta). -` - return strings.TrimSpace(helpText) -} - -// Run executes tasks in the monorepo -func (c *UnlinkCommand) Run(args []string) int { - if err := config.WriteRepoConfigFile(&config.TurborepoConfig{}); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("could not unlink. Something went wrong: %w", err)) - return 1 - } - c.Ui.Output(util.Sprintf("${GREY}> Disabled Remote Caching${RESET}")) - return 0 -} - -// logError logs an error and outputs it to the UI. -func (c *UnlinkCommand) logError(log hclog.Logger, prefix string, err error) { - log.Error(prefix, "error", err) - - if prefix != "" { - prefix += ": " - } - - c.Ui.Error(fmt.Sprintf("%s%s%s", ui.ERROR_PREFIX, prefix, color.RedString(" %v", err))) -} diff --git a/cli/internal/prune/prune.go b/cli/internal/prune/prune.go deleted file mode 100644 index 85047fba04aec..0000000000000 --- a/cli/internal/prune/prune.go +++ /dev/null @@ -1,310 +0,0 @@ -package prune - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" - - "github.com/vercel/turborepo/cli/internal/config" - "github.com/vercel/turborepo/cli/internal/context" - "github.com/vercel/turborepo/cli/internal/fs" - "github.com/vercel/turborepo/cli/internal/ui" - "github.com/vercel/turborepo/cli/internal/util" - - "github.com/fatih/color" - "github.com/hashicorp/go-hclog" - "github.com/mitchellh/cli" - "github.com/pkg/errors" - "gopkg.in/yaml.v3" -) - -// PruneCommand is a Command implementation that tells Turbo to run a task -type PruneCommand struct { - Config *config.Config - Ui *cli.ColoredUi -} - -// Synopsis of run command -func (c *PruneCommand) Synopsis() string { - return "Prepare a subset of your monorepo" -} - -// Help returns information about the `run` command -func (c *PruneCommand) Help() string { - helpText := ` -Usage: turbo prune --scope= - - Prepare a subset of your monorepo. - -Options: - --help Show this screen. - --scope Specify package to act as entry point - for pruned monorepo (required). - --docker Output pruned workspace into 'full' - and 'json' directories optimized for - Docker layer caching. (default false) -` - return strings.TrimSpace(helpText) -} - -type PruneOptions struct { - scope string - cwd string - docker bool -} - -func parsePruneArgs(args []string) (*PruneOptions, error) { - var options = &PruneOptions{} - - if len(args) == 0 { - return nil, errors.Errorf("At least one target must be specified.") - } - - cwd, err := os.Getwd() - if err != nil { - return nil, errors.Errorf("invalid working directory") - } - options.cwd = cwd - for _, arg := range args { - if strings.HasPrefix(arg, "--") { - switch { - case strings.HasPrefix(arg, "--scope="): - options.scope = arg[len("--scope="):] - case strings.HasPrefix(arg, "--docker"): - options.docker = true - case strings.HasPrefix(arg, "--cwd="): - if len(arg[len("--cwd="):]) > 1 { - options.cwd = arg[len("--cwd="):] - } - default: - return nil, errors.New(fmt.Sprintf("unknown flag: %v", arg)) - } - } - } - - return options, nil -} - -// Prune creates a smaller monorepo with only the required workspaces -func (c *PruneCommand) Run(args []string) int { - pruneOptions, err := parsePruneArgs(args) - logger := log.New(os.Stdout, "", 0) - if err != nil { - c.logError(c.Config.Logger, "", err) - return 1 - } - ctx, err := context.New(context.WithGraph(pruneOptions.cwd, c.Config)) - - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("could not construct graph: %w", err)) - return 1 - } - c.Config.Logger.Trace("scope", "value", pruneOptions.scope) - target, scopeIsValid := ctx.PackageInfos[pruneOptions.scope] - if !scopeIsValid { - c.logError(c.Config.Logger, "", errors.Errorf("invalid scope: package not found")) - return 1 - } - c.Config.Logger.Trace("target", "value", target.Name) - c.Config.Logger.Trace("directory", "value", target.Dir) - c.Config.Logger.Trace("external deps", "value", target.UnresolvedExternalDeps) - c.Config.Logger.Trace("internal deps", "value", target.InternalDeps) - c.Config.Logger.Trace("docker", "value", pruneOptions.docker) - c.Config.Logger.Trace("out dir", "value", filepath.Join(pruneOptions.cwd, "out")) - - if !util.IsYarn(ctx.Backend.Name) { - c.logError(c.Config.Logger, "", fmt.Errorf("this command is not yet implemented for %s", ctx.Backend.Name)) - return 1 - } else if ctx.Backend.Name == "nodejs-berry" { - isNMLinker, err := util.IsNMLinker(pruneOptions.cwd) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("could not determine if yarn is using `nodeLinker: node-modules`: %w", err)) - return 1 - } else if !isNMLinker { - c.logError(c.Config.Logger, "", fmt.Errorf("only yarn v2/v3 with `nodeLinker: node-modules` is supported at this time")) - return 1 - } - } - - logger.Printf("Generating pruned monorepo for %v in %v", ui.Bold(pruneOptions.scope), ui.Bold(filepath.Join(pruneOptions.cwd, "out"))) - - err = fs.EnsureDir(filepath.Join(pruneOptions.cwd, "out", "package.json")) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("could not create directory: %w", err)) - return 1 - } - workspaces := []string{} - lockfile := ctx.RootPackageInfo.SubLockfile - targets := []interface{}{pruneOptions.scope} - internalDeps, err := ctx.TopologicalGraph.Ancestors(pruneOptions.scope) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("could find traverse the dependency graph to find topological dependencies: %w", err)) - return 1 - } - targets = append(targets, internalDeps.List()...) - - for _, internalDep := range targets { - if internalDep == ctx.RootNode { - continue - } - workspaces = append(workspaces, ctx.PackageInfos[internalDep].Dir) - if pruneOptions.docker { - targetDir := filepath.Join(pruneOptions.cwd, "out", "full", ctx.PackageInfos[internalDep].Dir) - jsonDir := filepath.Join(pruneOptions.cwd, "out", "json", ctx.PackageInfos[internalDep].PackageJSONPath) - if err := fs.EnsureDir(targetDir); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to create folder %v for %v: %w", targetDir, internalDep, err)) - return 1 - } - if err := fs.RecursiveCopy(ctx.PackageInfos[internalDep].Dir, targetDir, fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy %v into %v: %w", internalDep, targetDir, err)) - return 1 - } - if err := fs.EnsureDir(jsonDir); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to create folder %v for %v: %w", jsonDir, internalDep, err)) - return 1 - } - if err := fs.RecursiveCopy(ctx.PackageInfos[internalDep].PackageJSONPath, jsonDir, fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy %v into %v: %w", internalDep, jsonDir, err)) - return 1 - } - } else { - targetDir := filepath.Join(pruneOptions.cwd, "out", ctx.PackageInfos[internalDep].Dir) - if err := fs.EnsureDir(targetDir); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to create folder %v for %v: %w", targetDir, internalDep, err)) - return 1 - } - if err := fs.RecursiveCopy(ctx.PackageInfos[internalDep].Dir, targetDir, fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy %v into %v: %w", internalDep, targetDir, err)) - return 1 - } - } - - for k, v := range ctx.PackageInfos[internalDep].SubLockfile { - lockfile[k] = v - } - - logger.Printf(" - Added %v", ctx.PackageInfos[internalDep].Name) - } - c.Config.Logger.Trace("new workspaces", "value", workspaces) - if pruneOptions.docker { - if fs.FileExists(".gitignore") { - if err := fs.CopyFile(".gitignore", filepath.Join(pruneOptions.cwd, "out", "full", ".gitignore"), fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy root .gitignore: %w", err)) - return 1 - } - } - // We only need to actually copy turbo.json into "full" folder since it isn't needed for installation in docker - if fs.FileExists("turbo.json") { - if err := fs.CopyFile("turbo.json", filepath.Join(pruneOptions.cwd, "out", "full", "turbo.json"), fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy root turbo.json: %w", err)) - return 1 - } - } - - if err := fs.CopyFile("package.json", filepath.Join(pruneOptions.cwd, "out", "full", "package.json"), fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy root package.json: %w", err)) - return 1 - } - - if err := fs.CopyFile("package.json", filepath.Join(pruneOptions.cwd, "out", "json", "package.json"), fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy root package.json: %w", err)) - return 1 - } - } else { - if fs.FileExists(".gitignore") { - if err := fs.CopyFile(".gitignore", filepath.Join(pruneOptions.cwd, "out", ".gitignore"), fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy root .gitignore: %w", err)) - return 1 - } - } - - if fs.FileExists("turbo.json") { - if err := fs.CopyFile("turbo.json", filepath.Join(pruneOptions.cwd, "out", "turbo.json"), fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy root turbo.json: %w", err)) - return 1 - } - } - - if err := fs.CopyFile("package.json", filepath.Join(pruneOptions.cwd, "out", "package.json"), fs.DirPermissions); err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to copy root package.json: %w", err)) - return 1 - } - } - - var b bytes.Buffer - yamlEncoder := yaml.NewEncoder(&b) - yamlEncoder.SetIndent(2) // this is what you're looking for - yamlEncoder.Encode(lockfile) - - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to materialize sub-lockfile. This can happen if your lockfile contains merge conflicts or is somehow corrupted. Please report this if it occurs: %w", err)) - return 1 - } - err = ioutil.WriteFile(filepath.Join(pruneOptions.cwd, "out", "yarn.lock"), b.Bytes(), fs.DirPermissions) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to write sub-lockfile: %w", err)) - return 1 - } - - tmpGeneratedLockfile, err := os.Create(filepath.Join(pruneOptions.cwd, "out", "yarn-tmp.lock")) - tmpGeneratedLockfileWriter := bufio.NewWriter(tmpGeneratedLockfile) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed create temporary lockfile: %w", err)) - return 1 - } - - if ctx.Backend.Name == "nodejs-yarn" { - tmpGeneratedLockfileWriter.WriteString("# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY.\n# yarn lockfile v1\n\n") - } else { - tmpGeneratedLockfileWriter.WriteString("# This file is generated by running \"yarn install\" inside your project.\n# Manual changes might be lost - proceed with caution!\n\n__metadata:\nversion: 5\ncacheKey: 8\n\n") - } - - // because of yarn being yarn, we need to inject lines in between each block of YAML to make it "valid" SYML - generatedLockfile, err := os.Open(filepath.Join(filepath.Join(pruneOptions.cwd, "out", "yarn.lock"))) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to massage lockfile: %w", err)) - return 1 - } - - scan := bufio.NewScanner(generatedLockfile) - buf := make([]byte, 0, 1024*1024) - scan.Buffer(buf, 10*1024*1024) - for scan.Scan() { - line := scan.Text() //Writing to Stdout - if !strings.HasPrefix(line, " ") { - tmpGeneratedLockfileWriter.WriteString(fmt.Sprintf("\n%v\n", strings.ReplaceAll(line, "'", "\""))) - } else { - tmpGeneratedLockfileWriter.WriteString(fmt.Sprintf("%v\n", strings.ReplaceAll(line, "'", "\""))) - } - } - // Make sure to flush the log write before we start saving it. - tmpGeneratedLockfileWriter.Flush() - - // Close the files before we rename them - tmpGeneratedLockfile.Close() - generatedLockfile.Close() - - // Rename the file - err = os.Rename(filepath.Join(pruneOptions.cwd, "out", "yarn-tmp.lock"), filepath.Join(pruneOptions.cwd, "out", "yarn.lock")) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed finalize lockfile: %w", err)) - return 1 - } - return 0 -} - -// logError logs an error and outputs it to the UI. -func (c *PruneCommand) logError(log hclog.Logger, prefix string, err error) { - log.Error(prefix, "error", err) - - if prefix != "" { - prefix += ": " - } - pref := color.New(color.Bold, color.FgRed, color.ReverseVideo).Sprint(" ERROR ") - c.Ui.Error(fmt.Sprintf("%s%s%s", pref, prefix, color.RedString(" %v", err))) -} diff --git a/cli/internal/run/color_cache.go b/cli/internal/run/color_cache.go index 46326c5ec5061..1d4a95a6b4977 100644 --- a/cli/internal/run/color_cache.go +++ b/cli/internal/run/color_cache.go @@ -3,9 +3,8 @@ package run import ( "sync" - "github.com/vercel/turborepo/cli/internal/util" - "github.com/fatih/color" + "github.com/vercel/turborepo/cli/internal/util" ) type colorFn = func(format string, a ...interface{}) string diff --git a/cli/internal/run/run.go b/cli/internal/run/run.go deleted file mode 100644 index 4c2f4e5fd7d66..0000000000000 --- a/cli/internal/run/run.go +++ /dev/null @@ -1,1176 +0,0 @@ -package run - -import ( - "bufio" - gocontext "context" - "encoding/json" - "flag" - "fmt" - "io" - "log" - "os" - "os/exec" - "path/filepath" - "sort" - "strings" - "sync" - "text/tabwriter" - "time" - - "github.com/vercel/turborepo/cli/internal/analytics" - "github.com/vercel/turborepo/cli/internal/api" - "github.com/vercel/turborepo/cli/internal/cache" - "github.com/vercel/turborepo/cli/internal/config" - "github.com/vercel/turborepo/cli/internal/context" - "github.com/vercel/turborepo/cli/internal/core" - "github.com/vercel/turborepo/cli/internal/fs" - "github.com/vercel/turborepo/cli/internal/globby" - "github.com/vercel/turborepo/cli/internal/logstreamer" - "github.com/vercel/turborepo/cli/internal/process" - "github.com/vercel/turborepo/cli/internal/scm" - "github.com/vercel/turborepo/cli/internal/scope" - "github.com/vercel/turborepo/cli/internal/ui" - "github.com/vercel/turborepo/cli/internal/util" - "github.com/vercel/turborepo/cli/internal/util/browser" - - "github.com/pyr-sh/dag" - - "github.com/fatih/color" - "github.com/hashicorp/go-hclog" - "github.com/mitchellh/cli" - "github.com/pkg/errors" -) - -const TOPOLOGICAL_PIPELINE_DELIMITER = "^" -const ENV_PIPELINE_DELIMITER = "$" - -// RunCommand is a Command implementation that tells Turbo to run a task -type RunCommand struct { - Config *config.Config - Ui *cli.ColoredUi - Processes *process.Manager -} - -// completeGraph represents the common state inferred from the filesystem and pipeline. -// It is not intended to include information specific to a particular run. -type completeGraph struct { - TopologicalGraph dag.AcyclicGraph - Pipeline map[string]fs.Pipeline - SCC [][]dag.Vertex - PackageInfos map[interface{}]*fs.PackageJSON - GlobalHash string - RootNode string -} - -// runSpec contains the run-specific configuration elements that come from a particular -// invocation of turbo. -type runSpec struct { - Targets []string - FilteredPkgs util.Set - Opts *RunOptions -} - -type LogsMode string - -const ( - FullLogs LogsMode = "full" - HashLogs LogsMode = "hash" - NoLogs LogsMode = "none" -) - -func (rs *runSpec) ArgsForTask(task string) []string { - passThroughArgs := make([]string, 0, len(rs.Opts.passThroughArgs)) - for _, target := range rs.Targets { - if target == task { - passThroughArgs = append(passThroughArgs, rs.Opts.passThroughArgs...) - } - } - return passThroughArgs -} - -// Synopsis of run command -func (c *RunCommand) Synopsis() string { - return "Run a task" -} - -// Help returns information about the `run` command -func (c *RunCommand) Help() string { - helpText := strings.TrimSpace(` -Usage: turbo run [options] ... - - Run tasks across projects in your monorepo. - - By default, turbo executes tasks in topological order (i.e. - dependencies first) and then caches the results. Re-running commands for - tasks already in the cache will skip re-execution and immediately move - artifacts from the cache into the correct output folders (as if the task - occurred again). - -Options: - --help Show this message. - --scope Specify package(s) to act as entry points for task - execution. Supports globs. - --cache-dir Specify local filesystem cache directory. - (default "./node_modules/.cache/turbo") - --concurrency Limit the concurrency of task execution. Use 1 for - serial (i.e. one-at-a-time) execution. (default 10) - --continue Continue execution even if a task exits with an error - or non-zero exit code. The default behavior is to bail - immediately. (default false) - --force Ignore the existing cache (to force execution). - (default false) - --graph Generate a Dot graph of the task execution. - --global-deps Specify glob of global filesystem dependencies to - be hashed. Useful for .env and files in the root - directory. Can be specified multiple times. - --since Limit/Set scope to changed packages since a - mergebase. This uses the git diff ${target_branch}... - mechanism to identify which packages have changed. - --team The slug of the turborepo.com team. - --token A turborepo.com personal access token. - --ignore Files to ignore when calculating changed files - (i.e. --since). Supports globs. - --profile File to write turbo's performance profile output into. - You can load the file up in chrome://tracing to see - which parts of your build were slow. - --parallel Execute all tasks in parallel. (default false) - --include-dependencies Include the dependencies of tasks in execution. - (default false) - --no-deps Exclude dependent task consumers from execution. - (default false) - --no-cache Avoid saving task results to the cache. Useful for - development/watch tasks. (default false) - --output-logs Set type of process output logging. Use full to show - all output. Use hash-only to show only turbo-computed - task hashes. Use new-only to show only new output with - only hashes for cached tasks. Use none to hide process - output. (default full) - --dry/--dry-run[=json] List the packages in scope and the tasks that would be run, - but don't actually run them. Passing --dry=json or - --dry-run=json will render the output in JSON format. -`) - return strings.TrimSpace(helpText) -} - -// Run executes tasks in the monorepo -func (c *RunCommand) Run(args []string) int { - startAt := time.Now() - log.SetFlags(0) - flags := flag.NewFlagSet("run", flag.ContinueOnError) - flags.Usage = func() { c.Config.Logger.Info(c.Help()) } - if err := flags.Parse(args); err != nil { - return 1 - } - - runOptions, err := parseRunArgs(args, c.Ui) - if err != nil { - c.logError(c.Config.Logger, "", err) - return 1 - } - - c.Config.Cache.Dir = runOptions.cacheFolder - - ctx, err := context.New(context.WithGraph(runOptions.cwd, c.Config)) - if err != nil { - c.logError(c.Config.Logger, "", err) - return 1 - } - targets, err := getTargetsFromArguments(args, ctx.TurboConfig) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to resolve targets: %w", err)) - return 1 - } - - scmInstance, err := scm.FromInRepo(runOptions.cwd) - if err != nil { - if errors.Is(err, scm.ErrFallback) { - c.logWarning(c.Config.Logger, "", err) - } else { - c.logError(c.Config.Logger, "", fmt.Errorf("failed to create SCM: %w", err)) - return 1 - } - } - filteredPkgs, err := scope.ResolvePackages(runOptions.ScopeOpts(), scmInstance, ctx, c.Ui, c.Config.Logger) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("failed resolve packages to run %v", err)) - } - c.Config.Logger.Debug("global hash", "value", ctx.GlobalHash) - c.Config.Logger.Debug("local cache folder", "path", runOptions.cacheFolder) - fs.EnsureDir(runOptions.cacheFolder) - - // TODO: consolidate some of these arguments - g := &completeGraph{ - TopologicalGraph: ctx.TopologicalGraph, - Pipeline: ctx.TurboConfig.Pipeline, - SCC: ctx.SCC, - PackageInfos: ctx.PackageInfos, - GlobalHash: ctx.GlobalHash, - RootNode: ctx.RootNode, - } - rs := &runSpec{ - Targets: targets, - FilteredPkgs: filteredPkgs, - Opts: runOptions, - } - backend := ctx.Backend - return c.runOperation(g, rs, backend, startAt) -} - -func (c *RunCommand) runOperation(g *completeGraph, rs *runSpec, backend *api.LanguageBackend, startAt time.Time) int { - var topoVisit []interface{} - for _, node := range g.SCC { - v := node[0] - if v == g.RootNode { - continue - } - topoVisit = append(topoVisit, v) - pack := g.PackageInfos[v] - - ancestralHashes := make([]string, 0, len(pack.InternalDeps)) - if len(pack.InternalDeps) > 0 { - for _, ancestor := range pack.InternalDeps { - if h, ok := g.PackageInfos[ancestor]; ok { - ancestralHashes = append(ancestralHashes, h.Hash) - } - } - sort.Strings(ancestralHashes) - } - var hashable = struct { - hashOfFiles string - ancestralHashes []string - externalDepsHash string - globalHash string - }{hashOfFiles: pack.FilesHash, ancestralHashes: ancestralHashes, externalDepsHash: pack.ExternalDepsHash, globalHash: g.GlobalHash} - - var err error - pack.Hash, err = fs.HashObject(hashable) - if err != nil { - c.logError(c.Config.Logger, "", fmt.Errorf("[ERROR] %v: error computing combined hash: %v", pack.Name, err)) - return 1 - } - c.Config.Logger.Debug(fmt.Sprintf("%v: package ancestralHash", pack.Name), "hash", ancestralHashes) - c.Config.Logger.Debug(fmt.Sprintf("%v: package hash", pack.Name), "hash", pack.Hash) - } - - c.Config.Logger.Debug("topological sort order", "value", topoVisit) - - vertexSet := make(util.Set) - for _, v := range g.TopologicalGraph.Vertices() { - vertexSet.Add(v) - } - // We remove nodes that aren't in the final filter set - for _, toRemove := range vertexSet.Difference(rs.FilteredPkgs) { - if toRemove != g.RootNode { - g.TopologicalGraph.Remove(toRemove) - } - } - - // If we are running in parallel, then we remove all the edges in the graph - // except for the root - if rs.Opts.parallel { - for _, edge := range g.TopologicalGraph.Edges() { - if edge.Target() != g.RootNode { - g.TopologicalGraph.RemoveEdge(edge) - } - } - } - - engine, err := buildTaskGraph(&g.TopologicalGraph, g.Pipeline, rs) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error preparing engine: %s", err)) - return 1 - } - exitCode := 0 - if rs.Opts.dotGraph != "" { - err := c.generateDotGraph(engine.TaskGraph, filepath.Join(rs.Opts.cwd, rs.Opts.dotGraph)) - if err != nil { - c.logError(c.Config.Logger, "", err) - return 1 - } - } else if rs.Opts.dryRun { - tasksRun, err := c.executeDryRun(engine, g, rs, c.Config.Logger) - if err != nil { - c.logError(c.Config.Logger, "", err) - return 1 - } - packagesInScope := rs.FilteredPkgs.UnsafeListOfStrings() - sort.Strings(packagesInScope) - if rs.Opts.dryRunJson { - dryRun := &struct { - Packages []string `json:"packages"` - Tasks []hashedTask `json:"tasks"` - }{ - Packages: packagesInScope, - Tasks: tasksRun, - } - bytes, err := json.MarshalIndent(dryRun, "", " ") - if err != nil { - c.logError(c.Config.Logger, "", errors.Wrap(err, "failed to render to JSON")) - return 1 - } - c.Ui.Output(string(bytes)) - } else { - c.Ui.Output("") - c.Ui.Info(util.Sprintf("${CYAN}${BOLD}Packages in Scope${RESET}")) - p := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) - fmt.Fprintln(p, "Name\tPath\t") - for _, pkg := range packagesInScope { - fmt.Fprintln(p, fmt.Sprintf("%s\t%s\t", pkg, g.PackageInfos[pkg].Dir)) - } - p.Flush() - - c.Ui.Output("") - c.Ui.Info(util.Sprintf("${CYAN}${BOLD}Tasks to Run${RESET}")) - - for _, task := range tasksRun { - c.Ui.Info(util.Sprintf("${BOLD}%s${RESET}", task.TaskID)) - w := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0) - fmt.Fprintln(w, util.Sprintf(" ${GREY}Task\t=\t%s\t${RESET}", task.Task)) - fmt.Fprintln(w, util.Sprintf(" ${GREY}Package\t=\t%s\t${RESET}", task.Package)) - fmt.Fprintln(w, util.Sprintf(" ${GREY}Hash\t=\t%s\t${RESET}", task.Hash)) - fmt.Fprintln(w, util.Sprintf(" ${GREY}Directory\t=\t%s\t${RESET}", task.Dir)) - fmt.Fprintln(w, util.Sprintf(" ${GREY}Command\t=\t%s\t${RESET}", task.Command)) - fmt.Fprintln(w, util.Sprintf(" ${GREY}Outputs\t=\t%s\t${RESET}", strings.Join(task.Outputs, ", "))) - fmt.Fprintln(w, util.Sprintf(" ${GREY}Log File\t=\t%s\t${RESET}", task.LogFile)) - fmt.Fprintln(w, util.Sprintf(" ${GREY}Dependencies\t=\t%s\t${RESET}", strings.Join(task.Dependencies, ", "))) - fmt.Fprintln(w, util.Sprintf(" ${GREY}Dependendents\t=\t%s\t${RESET}", strings.Join(task.Dependents, ", "))) - w.Flush() - } - - } - } else { - packagesInScope := rs.FilteredPkgs.UnsafeListOfStrings() - sort.Strings(packagesInScope) - c.Ui.Output(fmt.Sprintf(ui.Dim("• Packages in scope: %v"), strings.Join(packagesInScope, ", "))) - if rs.Opts.stream { - c.Ui.Output(fmt.Sprintf("%s %s %s", ui.Dim("• Running"), ui.Dim(ui.Bold(strings.Join(rs.Targets, ", "))), ui.Dim(fmt.Sprintf("in %v packages", rs.FilteredPkgs.Len())))) - } - exitCode = c.executeTasks(g, rs, engine, backend, startAt) - } - - return exitCode -} - -func buildTaskGraph(topoGraph *dag.AcyclicGraph, pipeline map[string]fs.Pipeline, rs *runSpec) (*core.Scheduler, error) { - engine := core.NewScheduler(topoGraph) - for taskName, value := range pipeline { - topoDeps := make(util.Set) - deps := make(util.Set) - if util.IsPackageTask(taskName) { - for _, from := range value.DependsOn { - if strings.HasPrefix(from, ENV_PIPELINE_DELIMITER) { - continue - } - if util.IsPackageTask(from) { - engine.AddDep(from, taskName) - continue - } else if strings.Contains(from, TOPOLOGICAL_PIPELINE_DELIMITER) { - topoDeps.Add(from[1:]) - } else { - deps.Add(from) - } - } - _, id := util.GetPackageTaskFromId(taskName) - taskName = id - } else { - for _, from := range value.DependsOn { - if strings.HasPrefix(from, ENV_PIPELINE_DELIMITER) { - continue - } - if strings.Contains(from, TOPOLOGICAL_PIPELINE_DELIMITER) { - topoDeps.Add(from[1:]) - } else { - deps.Add(from) - } - } - } - - engine.AddTask(&core.Task{ - Name: taskName, - TopoDeps: topoDeps, - Deps: deps, - }) - } - - if err := engine.Prepare(&core.SchedulerExecutionOptions{ - Packages: rs.FilteredPkgs.UnsafeListOfStrings(), - TaskNames: rs.Targets, - TasksOnly: rs.Opts.only, - }); err != nil { - return nil, err - } - return engine, nil -} - -// RunOptions holds the current run operations configuration - -type RunOptions struct { - // Whether to include dependent impacted consumers in execution (defaults to true) - includeDependents bool - // Whether to include includeDependencies (pkg.dependencies) in execution (defaults to false) - includeDependencies bool - // List of globs of file paths to ignore from execution scope calculation - ignore []string - // Whether to stream log outputs - stream bool - // Show a dot graph - dotGraph string - // List of globs to global files whose contents will be included in the global hash calculation - globalDeps []string - // Filtered list of package entrypoints - scope []string - // Force execution to be serially one-at-a-time - concurrency int - // Whether to execute in parallel (defaults to false) - parallel bool - // Git diff used to calculate changed packages - since string - // Current working directory - cwd string - // Whether to emit a perf profile - profile string - // Force task execution - forceExecution bool - // Cache results - cache bool - // Cache folder - cacheFolder string - // Immediately exit on task failure - bail bool - passThroughArgs []string - // Restrict execution to only the listed task names. Default false - only bool - // Task logs output modes (cached and not cached tasks): - // full - show all, - // hash - only show task hash, - // none - show nothing - cacheHitLogsMode LogsMode - cacheMissLogsMode LogsMode - dryRun bool - dryRunJson bool -} - -func (ro *RunOptions) ScopeOpts() *scope.Opts { - return &scope.Opts{ - IncludeDependencies: ro.includeDependencies, - IncludeDependents: ro.includeDependents, - Patterns: ro.scope, - Since: ro.since, - Cwd: ro.cwd, - IgnorePatterns: ro.ignore, - GlobalDepPatterns: ro.globalDeps, - } -} - -func getDefaultRunOptions() *RunOptions { - return &RunOptions{ - bail: true, - includeDependents: true, - parallel: false, - concurrency: 10, - dotGraph: "", - includeDependencies: false, - cache: true, - profile: "", // empty string does no tracing - forceExecution: false, - stream: true, - only: false, - cacheHitLogsMode: FullLogs, - cacheMissLogsMode: FullLogs, - } -} - -func parseRunArgs(args []string, output cli.Ui) (*RunOptions, error) { - var runOptions = getDefaultRunOptions() - - if len(args) == 0 { - return nil, errors.Errorf("At least one task must be specified.") - } - - cwd, err := os.Getwd() - if err != nil { - return nil, fmt.Errorf("invalid working directory: %w", err) - } - runOptions.cwd = cwd - - unresolvedCacheFolder := filepath.FromSlash("./node_modules/.cache/turbo") - - // --scope and --since implies --include-dependencies for backwards compatibility - // When we switch to cobra we will need to track if it's been set manually. Currently - // it's only possible to set to true, but in the future a user could theoretically set - // it to false and override the default behavior. - includDepsSet := false - for argIndex, arg := range args { - if arg == "--" { - runOptions.passThroughArgs = args[argIndex+1:] - break - } else if strings.HasPrefix(arg, "--") { - switch { - case strings.HasPrefix(arg, "--since="): - if len(arg[len("--since="):]) > 0 { - runOptions.since = arg[len("--since="):] - } - case strings.HasPrefix(arg, "--scope="): - if len(arg[len("--scope="):]) > 0 { - runOptions.scope = append(runOptions.scope, arg[len("--scope="):]) - } - case strings.HasPrefix(arg, "--ignore="): - if len(arg[len("--ignore="):]) > 0 { - runOptions.ignore = append(runOptions.ignore, arg[len("--ignore="):]) - } - case strings.HasPrefix(arg, "--global-deps="): - if len(arg[len("--global-deps="):]) > 0 { - runOptions.globalDeps = append(runOptions.globalDeps, arg[len("--global-deps="):]) - } - case strings.HasPrefix(arg, "--cwd="): - if len(arg[len("--cwd="):]) > 0 { - runOptions.cwd = arg[len("--cwd="):] - } - case strings.HasPrefix(arg, "--parallel"): - runOptions.parallel = true - case strings.HasPrefix(arg, "--profile="): // this one must com before the next - if len(arg[len("--profile="):]) > 0 { - runOptions.profile = arg[len("--profile="):] - } - case strings.HasPrefix(arg, "--profile"): - runOptions.profile = fmt.Sprintf("%v-profile.json", time.Now().UnixNano()) - - case strings.HasPrefix(arg, "--no-deps"): - runOptions.includeDependents = false - case strings.HasPrefix(arg, "--no-cache"): - runOptions.cache = false - case strings.HasPrefix(arg, "--cacheFolder"): - output.Warn("[WARNING] The --cacheFolder flag has been deprecated and will be removed in future versions of turbo. Please use `--cache-dir` instead") - unresolvedCacheFolder = arg[len("--cacheFolder="):] - case strings.HasPrefix(arg, "--cache-dir"): - unresolvedCacheFolder = arg[len("--cache-dir="):] - case strings.HasPrefix(arg, "--continue"): - runOptions.bail = false - case strings.HasPrefix(arg, "--force"): - runOptions.forceExecution = true - case strings.HasPrefix(arg, "--stream"): - runOptions.stream = true - - case strings.HasPrefix(arg, "--graph="): // this one must com before the next - if len(arg[len("--graph="):]) > 0 { - runOptions.dotGraph = arg[len("--graph="):] - } - case strings.HasPrefix(arg, "--graph"): - runOptions.dotGraph = fmt.Sprintf("graph-%v.jpg", time.Now().UnixNano()) - case strings.HasPrefix(arg, "--serial"): - output.Warn("[WARNING] The --serial flag has been deprecated and will be removed in future versions of turbo. Please use `--concurrency=1` instead") - runOptions.concurrency = 1 - case strings.HasPrefix(arg, "--concurrency"): - concurrencyRaw := arg[len("--concurrency="):] - if concurrency, err := util.ParseConcurrency(concurrencyRaw); err != nil { - return nil, err - } else { - runOptions.concurrency = concurrency - } - case strings.HasPrefix(arg, "--includeDependencies"): - output.Warn("[WARNING] The --includeDependencies flag has renamed to --include-dependencies for consistency. Please use `--include-dependencies` instead") - runOptions.includeDependencies = true - includDepsSet = true - case strings.HasPrefix(arg, "--include-dependencies"): - runOptions.includeDependencies = true - includDepsSet = true - case strings.HasPrefix(arg, "--only"): - runOptions.only = true - case strings.HasPrefix(arg, "--output-logs="): - outputLogsMode := arg[len("--output-logs="):] - switch outputLogsMode { - case "full": - runOptions.cacheMissLogsMode = FullLogs - runOptions.cacheHitLogsMode = FullLogs - case "none": - runOptions.cacheMissLogsMode = NoLogs - runOptions.cacheHitLogsMode = NoLogs - case "hash-only": - runOptions.cacheMissLogsMode = HashLogs - runOptions.cacheHitLogsMode = HashLogs - case "new-only": - runOptions.cacheMissLogsMode = FullLogs - runOptions.cacheHitLogsMode = HashLogs - default: - output.Warn(fmt.Sprintf("[WARNING] unknown value %v for --output-logs CLI flag. Falling back to full", outputLogsMode)) - } - case strings.HasPrefix(arg, "--dry-run"): - runOptions.dryRun = true - if strings.HasPrefix(arg, "--dry-run=json") { - runOptions.dryRunJson = true - } - case strings.HasPrefix(arg, "--dry"): - runOptions.dryRun = true - if strings.HasPrefix(arg, "--dry=json") { - runOptions.dryRunJson = true - } - case strings.HasPrefix(arg, "--team"): - case strings.HasPrefix(arg, "--token"): - case strings.HasPrefix(arg, "--api"): - case strings.HasPrefix(arg, "--url"): - case strings.HasPrefix(arg, "--trace"): - case strings.HasPrefix(arg, "--cpuprofile"): - case strings.HasPrefix(arg, "--heap"): - case strings.HasPrefix(arg, "--no-gc"): - default: - return nil, errors.New(fmt.Sprintf("unknown flag: %v", arg)) - } - } - } - if len(runOptions.scope) != 0 && runOptions.since != "" && !includDepsSet { - runOptions.includeDependencies = true - } - - // Force streaming output in CI/CD non-interactive mode - if !ui.IsTTY || ui.IsCI { - runOptions.stream = true - } - - // We can only set this cache folder after we know actual cwd - runOptions.cacheFolder = filepath.Join(runOptions.cwd, unresolvedCacheFolder) - - return runOptions, nil -} - -// logError logs an error and outputs it to the UI. -func (c *RunCommand) logError(log hclog.Logger, prefix string, err error) { - log.Error(prefix, "error", err) - - if prefix != "" { - prefix += ": " - } - - c.Ui.Error(fmt.Sprintf("%s%s%s", ui.ERROR_PREFIX, prefix, color.RedString(" %v", err))) -} - -// logError logs an error and outputs it to the UI. -func (c *RunCommand) logWarning(log hclog.Logger, prefix string, err error) { - log.Warn(prefix, "warning", err) - - if prefix != "" { - prefix = " " + prefix + ": " - } - - c.Ui.Error(fmt.Sprintf("%s%s%s", ui.WARNING_PREFIX, prefix, color.YellowString(" %v", err))) -} - -func hasGraphViz() bool { - err := exec.Command("dot", "-v").Run() - return err == nil -} - -func (c *RunCommand) executeTasks(g *completeGraph, rs *runSpec, engine *core.Scheduler, backend *api.LanguageBackend, startAt time.Time) int { - goctx := gocontext.Background() - var analyticsSink analytics.Sink - if c.Config.IsLoggedIn() { - analyticsSink = c.Config.ApiClient - } else { - analyticsSink = analytics.NullSink - } - analyticsClient := analytics.NewClient(goctx, analyticsSink, c.Config.Logger.Named("analytics")) - defer analyticsClient.CloseWithTimeout(50 * time.Millisecond) - turboCache := cache.New(c.Config, analyticsClient) - defer turboCache.Shutdown() - runState := NewRunState(rs.Opts, startAt) - runState.Listen(c.Ui, time.Now()) - ec := &execContext{ - colorCache: NewColorCache(), - runState: runState, - rs: rs, - ui: &cli.ConcurrentUi{Ui: c.Ui}, - turboCache: turboCache, - logger: c.Config.Logger, - backend: backend, - processes: c.Processes, - } - - // run the thing - errs := engine.Execute(g.getPackageTaskVisitor(ec.exec), core.ExecOpts{ - Parallel: rs.Opts.parallel, - Concurrency: rs.Opts.concurrency, - }) - - // Track if we saw any child with a non-zero exit code - exitCode := 0 - exitCodeErr := &process.ChildExit{} - for _, err := range errs { - if errors.As(err, &exitCodeErr) { - if exitCodeErr.ExitCode > exitCode { - exitCode = exitCodeErr.ExitCode - } - } - c.Ui.Error(err.Error()) - } - - ec.logReplayWaitGroup.Wait() - - if err := runState.Close(c.Ui, rs.Opts.profile); err != nil { - c.Ui.Error(fmt.Sprintf("Error with profiler: %s", err.Error())) - return 1 - } - return exitCode -} - -type hashedTask struct { - TaskID string `json:"taskId"` - Task string `json:"task"` - Package string `json:"package"` - Hash string `json:"hash"` - Command string `json:"command"` - Outputs []string `json:"outputs"` - LogFile string `json:"logFile"` - Dir string `json:"directory"` - Dependencies []string `json:"dependencies"` - Dependents []string `json:"dependents"` -} - -func (c *RunCommand) executeDryRun(engine *core.Scheduler, g *completeGraph, rs *runSpec, logger hclog.Logger) ([]hashedTask, error) { - taskIDs := []hashedTask{} - errs := engine.Execute(g.getPackageTaskVisitor(func(pt *packageTask) error { - command, ok := pt.pkg.Scripts[pt.task] - if !ok { - logger.Debug("no task in package, skipping") - logger.Debug("done", "status", "skipped") - return nil - } - passThroughArgs := rs.ArgsForTask(pt.task) - hash, err := pt.hash(passThroughArgs, logger) - if err != nil { - return err - } - ancestors, err := engine.TaskGraph.Ancestors(pt.taskID) - if err != nil { - return err - } - stringAncestors := []string{} - for _, dep := range ancestors { - // Don't leak out internal ROOT_NODE_NAME nodes, which are just placeholders - if !strings.Contains(dep.(string), core.ROOT_NODE_NAME) { - stringAncestors = append(stringAncestors, dep.(string)) - } - } - descendents, err := engine.TaskGraph.Descendents(pt.taskID) - if err != nil { - return err - } - stringDescendents := []string{} - for _, dep := range descendents { - // Don't leak out internal ROOT_NODE_NAME nodes, which are just placeholders - if !strings.Contains(dep.(string), core.ROOT_NODE_NAME) { - stringDescendents = append(stringDescendents, dep.(string)) - } - } - sort.Strings(stringDescendents) - - taskIDs = append(taskIDs, hashedTask{ - TaskID: pt.taskID, - Task: pt.task, - Package: pt.packageName, - Hash: hash, - Command: command, - Dir: pt.pkg.Dir, - Outputs: pt.ExternalOutputs(), - LogFile: pt.RepoRelativeLogFile(), - Dependencies: stringAncestors, - Dependents: stringDescendents, - }) - return nil - }), core.ExecOpts{ - Concurrency: 1, - Parallel: false, - }) - if len(errs) > 0 { - for _, err := range errs { - c.Ui.Error(err.Error()) - } - return nil, errors.New("errors occurred during dry-run graph traversal") - } - return taskIDs, nil -} - -// Replay logs will try to replay logs back to the stdout -func replayLogs(logger hclog.Logger, prefixUi cli.Ui, runOptions *RunOptions, logFileName, hash string, wg *sync.WaitGroup, silent bool, outputLogsMode LogsMode) { - defer wg.Done() - logger.Debug("start replaying logs") - f, err := os.Open(filepath.Join(runOptions.cwd, logFileName)) - if err != nil && !silent { - prefixUi.Warn(fmt.Sprintf("error reading logs: %v", err)) - logger.Error(fmt.Sprintf("error reading logs: %v", err.Error())) - } - defer f.Close() - if outputLogsMode != NoLogs { - scan := bufio.NewScanner(f) - if outputLogsMode == HashLogs { - //Writing to Stdout only the "cache hit, replaying output" line - scan.Scan() - prefixUi.Output(ui.StripAnsi(string(scan.Bytes()))) - } else { - for scan.Scan() { - prefixUi.Output(ui.StripAnsi(string(scan.Bytes()))) //Writing to Stdout - } - } - } - logger.Debug("finish replaying logs") -} - -// GetTargetsFromArguments returns a list of targets from the arguments and Turbo config. -// Return targets are always unique sorted alphabetically. -func getTargetsFromArguments(arguments []string, configJson *fs.TurboConfigJSON) ([]string, error) { - targets := make(util.Set) - for _, arg := range arguments { - if arg == "--" { - break - } - if !strings.HasPrefix(arg, "-") { - targets.Add(arg) - found := false - for task := range configJson.Pipeline { - if task == arg { - found = true - } - } - if !found { - return nil, fmt.Errorf("task `%v` not found in turbo pipeline in package.json. Are you sure you added it?", arg) - } - } - } - stringTargets := targets.UnsafeListOfStrings() - sort.Strings(stringTargets) - return stringTargets, nil -} - -type execContext struct { - colorCache *ColorCache - runState *RunState - rs *runSpec - logReplayWaitGroup sync.WaitGroup - ui cli.Ui - turboCache cache.Cache - logger hclog.Logger - backend *api.LanguageBackend - processes *process.Manager -} - -func (e *execContext) logError(log hclog.Logger, prefix string, err error) { - e.logger.Error(prefix, "error", err) - - if prefix != "" { - prefix += ": " - } - - e.ui.Error(fmt.Sprintf("%s%s%s", ui.ERROR_PREFIX, prefix, color.RedString(" %v", err))) -} - -func (e *execContext) exec(pt *packageTask) error { - cmdTime := time.Now() - - targetLogger := e.logger.Named(fmt.Sprintf("%v:%v", pt.pkg.Name, pt.task)) - targetLogger.Debug("start") - - // bail if the script doesn't exist - if _, ok := pt.pkg.Scripts[pt.task]; !ok { - targetLogger.Debug("no task in package, skipping") - targetLogger.Debug("done", "status", "skipped", "duration", time.Since(cmdTime)) - return nil - } - - // Setup tracer - tracer := e.runState.Run(util.GetTaskId(pt.pkg.Name, pt.task)) - - // Create a logger - pref := e.colorCache.PrefixColor(pt.pkg.Name) - actualPrefix := pref("%s:%s: ", pt.pkg.Name, pt.task) - targetUi := &cli.PrefixedUi{ - Ui: e.ui, - OutputPrefix: actualPrefix, - InfoPrefix: actualPrefix, - ErrorPrefix: actualPrefix, - WarnPrefix: actualPrefix, - } - - logFileName := filepath.Join(pt.pkg.Dir, ".turbo", fmt.Sprintf("turbo-%v.log", pt.task)) - targetLogger.Debug("log file", "path", filepath.Join(e.rs.Opts.cwd, logFileName)) - - passThroughArgs := e.rs.ArgsForTask(pt.task) - hash, err := pt.hash(passThroughArgs, e.logger) - e.logger.Debug("task hash", "value", hash) - if err != nil { - e.ui.Error(fmt.Sprintf("Hashing error: %v", err)) - // @TODO probably should abort fatally??? - } - // Cache --------------------------------------------- - var hit bool - if !e.rs.Opts.forceExecution { - hit, _, _, err = e.turboCache.Fetch(e.rs.Opts.cwd, hash, nil) - if err != nil { - targetUi.Error(fmt.Sprintf("error fetching from cache: %s", err)) - } else if hit { - if e.rs.Opts.stream && fs.FileExists(filepath.Join(e.rs.Opts.cwd, logFileName)) { - e.logReplayWaitGroup.Add(1) - go replayLogs(targetLogger, e.ui, e.rs.Opts, logFileName, hash, &e.logReplayWaitGroup, false, e.rs.Opts.cacheHitLogsMode) - } - targetLogger.Debug("done", "status", "complete", "duration", time.Since(cmdTime)) - tracer(TargetCached, nil) - - return nil - } - if e.rs.Opts.stream && e.rs.Opts.cacheHitLogsMode != NoLogs { - targetUi.Output(fmt.Sprintf("cache miss, executing %s", ui.Dim(hash))) - } - } else { - if e.rs.Opts.stream && e.rs.Opts.cacheHitLogsMode != NoLogs { - targetUi.Output(fmt.Sprintf("cache bypass, force executing %s", ui.Dim(hash))) - } - } - - // Setup command execution - argsactual := append([]string{"run"}, pt.task) - argsactual = append(argsactual, passThroughArgs...) - // @TODO: @jaredpalmer fix this hack to get the package manager's name - var cmd *exec.Cmd - if e.backend.Name == "nodejs-berry" { - cmd = exec.Command("yarn", argsactual...) - } else { - cmd = exec.Command(strings.TrimPrefix(e.backend.Name, "nodejs-"), argsactual...) - } - cmd.Dir = pt.pkg.Dir - envs := fmt.Sprintf("TURBO_HASH=%v", hash) - cmd.Env = append(os.Environ(), envs) - - // Setup stdout/stderr - // If we are not caching anything, then we don't need to write logs to disk - // be careful about this conditional given the default of cache = true - var writer io.Writer - if !e.rs.Opts.cache || (pt.pipeline.Cache != nil && !*pt.pipeline.Cache) { - writer = os.Stdout - } else { - // Setup log file - if err := fs.EnsureDir(logFileName); err != nil { - tracer(TargetBuildFailed, err) - e.logError(targetLogger, actualPrefix, err) - if e.rs.Opts.bail { - os.Exit(1) - } - } - output, err := os.Create(logFileName) - if err != nil { - tracer(TargetBuildFailed, err) - e.logError(targetLogger, actualPrefix, err) - if e.rs.Opts.bail { - os.Exit(1) - } - } - defer output.Close() - bufWriter := bufio.NewWriter(output) - bufWriter.WriteString(fmt.Sprintf("%scache hit, replaying output %s\n", actualPrefix, ui.Dim(hash))) - defer bufWriter.Flush() - if e.rs.Opts.cacheMissLogsMode == NoLogs || e.rs.Opts.cacheMissLogsMode == HashLogs { - // only write to log file, not to stdout - writer = bufWriter - } else { - writer = io.MultiWriter(os.Stdout, bufWriter) - } - } - - logger := log.New(writer, "", 0) - // Setup a streamer that we'll pipe cmd.Stdout to - logStreamerOut := logstreamer.NewLogstreamer(logger, actualPrefix, false) - // Setup a streamer that we'll pipe cmd.Stderr to. - logStreamerErr := logstreamer.NewLogstreamer(logger, actualPrefix, false) - cmd.Stderr = logStreamerErr - cmd.Stdout = logStreamerOut - // Flush/Reset any error we recorded - logStreamerErr.FlushRecord() - logStreamerOut.FlushRecord() - - // Run the command - if err := e.processes.Exec(cmd); err != nil { - // if we already know we're in the process of exiting, - // we don't need to record an error to that effect. - if errors.Is(err, process.ErrClosing) { - return nil - } - tracer(TargetBuildFailed, err) - targetLogger.Error("Error: command finished with error: %w", err) - if e.rs.Opts.bail { - if e.rs.Opts.stream { - targetUi.Error(fmt.Sprintf("Error: command finished with error: %s", err)) - } else { - f, err := os.Open(filepath.Join(e.rs.Opts.cwd, logFileName)) - if err != nil { - targetUi.Warn(fmt.Sprintf("failed reading logs: %v", err)) - } - defer f.Close() - scan := bufio.NewScanner(f) - e.ui.Error("") - e.ui.Error(util.Sprintf("%s ${RED}%s finished with error${RESET}", ui.ERROR_PREFIX, util.GetTaskId(pt.pkg.Name, pt.task))) - e.ui.Error("") - for scan.Scan() { - e.ui.Output(util.Sprintf("${RED}%s:%s: ${RESET}%s", pt.pkg.Name, pt.task, scan.Bytes())) //Writing to Stdout - } - } - e.processes.Close() - } else { - if e.rs.Opts.stream { - targetUi.Warn("command finished with error, but continuing...") - } - } - return err - } - - // Cache command outputs - if e.rs.Opts.cache && (pt.pipeline.Cache == nil || *pt.pipeline.Cache) { - outputs := pt.HashableOutputs() - targetLogger.Debug("caching output", "outputs", outputs) - ignore := []string{} - filesToBeCached := globby.GlobFiles(pt.pkg.Dir, outputs, ignore) - if err := e.turboCache.Put(pt.pkg.Dir, hash, int(time.Since(cmdTime).Milliseconds()), filesToBeCached); err != nil { - e.logError(targetLogger, "", fmt.Errorf("error caching output: %w", err)) - } - } - - // Clean up tracing - tracer(TargetBuilt, nil) - targetLogger.Debug("done", "status", "complete", "duration", time.Since(cmdTime)) - return nil -} - -func (c *RunCommand) generateDotGraph(taskGraph *dag.AcyclicGraph, outputFilename string) error { - graphString := string(taskGraph.Dot(&dag.DotOpts{ - Verbose: true, - DrawCycles: true, - })) - ext := filepath.Ext(outputFilename) - if ext == ".html" { - f, err := os.Create(outputFilename) - if err != nil { - return fmt.Errorf("error writing graph: %w", err) - } - defer f.Close() - f.WriteString(` - - - - Graph - - - - - - - `) - c.Ui.Output("") - c.Ui.Output(fmt.Sprintf("✔ Generated task graph in %s", ui.Bold(outputFilename))) - if ui.IsTTY { - browser.OpenBrowser(outputFilename) - } - return nil - } - hasDot := hasGraphViz() - if hasDot { - dotArgs := []string{"-T" + ext[1:], "-o", outputFilename} - cmd := exec.Command("dot", dotArgs...) - cmd.Stdin = strings.NewReader(graphString) - if err := cmd.Run(); err != nil { - return fmt.Errorf("could not generate task graphfile %v: %w", outputFilename, err) - } else { - c.Ui.Output("") - c.Ui.Output(fmt.Sprintf("✔ Generated task graph in %s", ui.Bold(outputFilename))) - } - } else { - c.Ui.Output("") - c.Ui.Warn(color.New(color.FgYellow, color.Bold, color.ReverseVideo).Sprint(" WARNING ") + color.YellowString(" `turbo` uses Graphviz to generate an image of your\ngraph, but Graphviz isn't installed on this machine.\n\nYou can download Graphviz from https://graphviz.org/download.\n\nIn the meantime, you can use this string output with an\nonline Dot graph viewer.")) - c.Ui.Output("") - c.Ui.Output(graphString) - } - return nil -} - -type packageTask struct { - taskID string - task string - packageName string - pkg *fs.PackageJSON - pipeline *fs.Pipeline -} - -func (pt *packageTask) ExternalOutputs() []string { - if pt.pipeline.Outputs == nil { - return []string{"dist/**/*", "build/**/*"} - } - return pt.pipeline.Outputs -} - -func (pt *packageTask) RepoRelativeLogFile() string { - return filepath.Join(pt.pkg.Dir, ".turbo", fmt.Sprintf("turbo-%v.log", pt.task)) -} - -func (pt *packageTask) HashableOutputs() []string { - outputs := []string{fmt.Sprintf(".turbo/turbo-%v.log", pt.task)} - outputs = append(outputs, pt.ExternalOutputs()...) - return outputs -} - -func (pt *packageTask) hash(args []string, logger hclog.Logger) (string, error) { - // Hash --------------------------------------------- - outputs := pt.HashableOutputs() - logger.Debug("task output globs", "outputs", outputs) - - // Hash the task-specific environment variables found in the dependsOnKey in the pipeline - var hashableEnvVars []string - var hashableEnvPairs []string - if len(pt.pipeline.DependsOn) > 0 { - for _, v := range pt.pipeline.DependsOn { - if strings.Contains(v, ENV_PIPELINE_DELIMITER) { - trimmed := strings.TrimPrefix(v, ENV_PIPELINE_DELIMITER) - hashableEnvPairs = append(hashableEnvPairs, fmt.Sprintf("%v=%v", trimmed, os.Getenv(trimmed))) - hashableEnvVars = append(hashableEnvVars, trimmed) - } - } - sort.Strings(hashableEnvVars) // always sort them - } - logger.Debug("hashable env vars", "vars", hashableEnvVars) - hashable := struct { - Hash string - Task string - Outputs []string - PassThruArgs []string - HashableEnvPairs []string - }{ - Hash: pt.pkg.Hash, - Task: pt.task, - Outputs: outputs, - PassThruArgs: args, - HashableEnvPairs: hashableEnvPairs, - } - return fs.HashObject(hashable) -} - -func (g *completeGraph) getPackageTaskVisitor(visitor func(pt *packageTask) error) func(taskID string) error { - return func(taskID string) error { - name, task := util.GetPackageTaskFromId(taskID) - pkg := g.PackageInfos[name] - // first check for package-tasks - pipeline, ok := g.Pipeline[fmt.Sprintf("%v", taskID)] - if !ok { - // then check for regular tasks - altpipe, notcool := g.Pipeline[task] - // if neither, then bail - if !notcool && !ok { - return nil - } - // override if we need to... - pipeline = altpipe - } - return visitor(&packageTask{ - taskID: taskID, - task: task, - packageName: name, - pkg: pkg, - pipeline: &pipeline, - }) - } -} diff --git a/cli/internal/run/run_state.go b/cli/internal/run/run_state.go index 2bf95dc8d4875..9fdf6f829363d 100644 --- a/cli/internal/run/run_state.go +++ b/cli/internal/run/run_state.go @@ -9,16 +9,48 @@ import ( "sync" "time" + "github.com/google/chrometracing" "github.com/vercel/turborepo/cli/internal/fs" + tlogger "github.com/vercel/turborepo/cli/internal/logger" + "github.com/vercel/turborepo/cli/internal/scope" "github.com/vercel/turborepo/cli/internal/ui" - "github.com/vercel/turborepo/cli/internal/util" - - cursor "github.com/vercel/turborepo/cli/internal/ui/term" - - "github.com/google/chrometracing" - "github.com/mitchellh/cli" + "github.com/vercel/turborepo/cli/internal/ui/term" ) +// RunOptions holds the current run operations configuration +type RunOptions struct { + IncludeDependents bool + Scope []string + CacheDir string + Concurrency int + ShouldContinue bool + Force bool + DotGraph string + Graph bool + GlobalDeps []string + Since string + Ignore []string + Profile string + Parallel bool + IncludeDeps bool + NoDeps bool + NoCache bool + Cwd string + Stream bool + Only bool + Bail bool + PassThroughArgs []string + DryRun bool + DryRunType string + OutputLogs string + // Task logs output modes (cached and not cached tasks): + // full - show all, + // hash - only show task hash, + // none - show nothing + CacheHitLogsMode string + CacheMissLogsMode string +} + // A RunResult represents a single event in the build process, i.e. a target starting or finishing // building, or reaching some milestone within those steps. type RunResult struct { @@ -38,6 +70,18 @@ type RunResult struct { // Tests TestSuite } +func (ro *RunOptions) ScopeOpts() *scope.Opts { + return &scope.Opts{ + IncludeDependencies: ro.IncludeDeps, + IncludeDependents: ro.IncludeDependents, + Patterns: ro.Scope, + Since: ro.Since, + Cwd: ro.Cwd, + IgnorePatterns: ro.Ignore, + GlobalDepPatterns: ro.GlobalDeps, + } +} + // A RunResultStatus represents the status of a target when we log a build result. type RunResultStatus int @@ -175,7 +219,7 @@ func (r *RunState) add(result *RunResult, previous string, active bool) { case result.Status == TargetBuildFailed: r.Failure++ r.Attempted++ - if r.runOptions.bail && !r.runOptions.stream { + if r.runOptions.Bail && !r.runOptions.Stream { r.done <- result.Label } case result.Status == TargetCached: @@ -187,20 +231,20 @@ func (r *RunState) add(result *RunResult, previous string, active bool) { } } -func (r *RunState) Listen(Ui cli.Ui, startAt time.Time) { - if r.runOptions.stream { +func (r *RunState) Listen(logger *tlogger.Logger, startAt time.Time) { + if r.runOptions.Stream { return } r.ticker = time.NewTicker(100 * time.Millisecond) r.done = make(chan string) lineBuffer := 10 - go func(r *RunState, Ui cli.Ui) { + go func(r *RunState, logger *tlogger.Logger) { z := r i := 0 for { select { case outcome := <-z.done: - if !r.runOptions.stream { + if !r.runOptions.Stream { if outcome == "done" { if i != 0 { cursor.EraseLinesAbove(os.Stdout, lineBuffer+2) @@ -209,26 +253,25 @@ func (r *RunState) Listen(Ui cli.Ui, startAt time.Time) { if i != 0 { cursor.EraseLinesAbove(os.Stdout, lineBuffer+2) } - z.Render(Ui, startAt, i, lineBuffer) + z.Render(logger, startAt, i, lineBuffer) } } case <-z.ticker.C: - if !r.runOptions.stream { + if !r.runOptions.Stream { if i != 0 { cursor.EraseLinesAbove(os.Stdout, lineBuffer+2) } - z.Render(Ui, startAt, i, lineBuffer) + z.Render(logger, startAt, i, lineBuffer) i++ } default: continue } } - }(r, Ui) - + }(r, logger) } -func (r *RunState) Render(ui cli.Ui, startAt time.Time, renderCount int, lineBuffer int) { +func (r *RunState) Render(logger *tlogger.Logger, startAt time.Time, renderCount int, lineBuffer int) { r.mu.Lock() defer r.mu.Unlock() idx := 0 @@ -237,8 +280,8 @@ func (r *RunState) Render(ui cli.Ui, startAt time.Time, renderCount int, lineBuf idx = buf - lineBuffer } tStr := fmt.Sprintf("%.2fs", time.Since(startAt).Seconds()) - ui.Output(util.Sprintf("${BOLD}>>> TURBO${RESET}")) - ui.Output(util.Sprintf("${BOLD}>>> BUILDING%s(%s)${RESET}", strings.Repeat(".", 52-len(tStr)), tStr)) + logger.Printf("${BOLD}>>> TURBO${RESET}") + logger.Printf("${BOLD}>>> BUILDING%s(%s)${RESET}", strings.Repeat(".", 52-len(tStr)), tStr) // In order to simplify the output, we want to fill in n < 10 with IDLE // TODO: we might want to match this up with --concurrency flag @@ -255,24 +298,24 @@ func (r *RunState) Render(ui cli.Ui, startAt time.Time, renderCount int, lineBuf fill := 60 - len(item.Label) switch r.state[k].Status { case TargetBuilding: - ui.Output(util.Sprintf("${WHITE}%s %s%s(%s)${RESET}", " • ", k, strings.Repeat(".", fill-len(t)), t)) + logger.Printf("${WHITE}%s %s%s(%s)${RESET}", " • ", k, strings.Repeat(".", fill-len(t)), t) case TargetCached: d = item.Duration.Truncate(time.Millisecond * 100).String() - ui.Output(util.Sprintf("${GREY}%s %s%s(%s)${RESET}", " ✓ ", k, strings.Repeat(".", fill-len(d)), d)) + logger.Printf("${GREY}%s %s%s(%s)${RESET}", " ✓ ", k, strings.Repeat(".", fill-len(d)), d) case TargetBuilt: - ui.Output(util.Sprintf("${GREEN}%s %s%s(%s)${RESET}", " ✓ ", k, strings.Repeat(".", fill-len(d)), d)) + logger.Printf("${GREEN}%s %s%s(%s)${RESET}", " ✓ ", k, strings.Repeat(".", fill-len(d)), d) case TargetBuildFailed: - ui.Output(util.Sprintf("${RED}%s %s%s(%s)${RESET}", " ˣ ", k, strings.Repeat(".", fill-len(d)), d)) + logger.Printf("${RED}%s %s%s(%s)${RESET}", " ˣ ", k, strings.Repeat(".", fill-len(d)), d) default: - ui.Output(util.Sprintf("${GREY}%s %s%s(%s)${RESET}", " ✓ ", k, strings.Repeat(".", fill-len(d)), d)) + logger.Printf("${GREY}%s %s%s(%s)${RESET}", " ✓ ", k, strings.Repeat(".", fill-len(d)), d) } } else { - ui.Output(util.Sprintf("${GREY}%s %s%s${RESET}", " - ", k, strings.Repeat(".", 62-len(k)))) + logger.Printf("${GREY}%s %s%s${RESET}", " - ", k, strings.Repeat(".", 62-len(k))) } } } -func (r *RunState) Close(Ui cli.Ui, filename string) error { +func (r *RunState) Close(logger *tlogger.Logger, filename string) error { outputPath := chrometracing.Path() name := fmt.Sprintf("turbo-%s.trace", time.Now().Format(time.RFC3339)) if filename != "" { @@ -284,7 +327,7 @@ func (r *RunState) Close(Ui cli.Ui, filename string) error { } } - if !r.runOptions.stream { + if !r.runOptions.Stream { r.ticker.Stop() r.done <- "done" } @@ -292,21 +335,21 @@ func (r *RunState) Close(Ui cli.Ui, filename string) error { if r.Cached == r.Attempted && r.Attempted > 0 { maybeFullTurbo = ui.Rainbow(">>> FULL TURBO") } - if r.runOptions.stream { - Ui.Output("") // Clear the line - Ui.Output(util.Sprintf("${BOLD} Tasks:${BOLD_GREEN} %v successful${RESET}${GRAY}, %v total", r.Cached+r.Success, r.Attempted)) - Ui.Output(util.Sprintf("${BOLD}Cached: %v cached${RESET}${GRAY}, %v total", r.Cached, r.Attempted)) - Ui.Output(util.Sprintf("${BOLD} Time: %v${RESET} %v", time.Since(r.startedAt).Truncate(time.Millisecond), maybeFullTurbo)) - Ui.Output("") + if r.runOptions.Stream { + logger.Printf("") // Clear the line + logger.Printf("${BOLD} Tasks:${BOLD_GREEN} %v successful${RESET}${GRAY}, %v total", r.Cached+r.Success, r.Attempted) + logger.Printf("${BOLD}Cached: %v cached${RESET}${GRAY}, %v total", r.Cached, r.Attempted) + logger.Printf("${BOLD} Time: %v${RESET} %v", time.Since(r.startedAt).Truncate(time.Millisecond), maybeFullTurbo) + logger.Printf("") } else { incrementality := fmt.Sprintf("%.f%% incremental", math.Round(float64(r.Cached)/float64(r.Attempted)*100)) if r.Failure > 0 { - r.Render(Ui, r.startedAt, 3, len(r.Ordered)) - Ui.Output(util.Sprintf("${BOLD_RED}>>> BUILDING...FINISHED WITH ERRORS${RESET} ${GREY}(%s) %s${RESET} %s${RESET}", time.Since(r.startedAt).Truncate(time.Millisecond).String(), incrementality, maybeFullTurbo)) + r.Render(logger, r.startedAt, 3, len(r.Ordered)) + logger.Printf("${BOLD_RED}>>> BUILDING...FINISHED WITH ERRORS${RESET} ${GREY}(%s) %s${RESET} %s${RESET}", time.Since(r.startedAt).Truncate(time.Millisecond).String(), incrementality, maybeFullTurbo) } else { - Ui.Output(util.Sprintf("${BOLD}>>> TURBO${RESET}")) - Ui.Output(util.Sprintf("${BOLD}>>> BUILDING...FINISHED${RESET} ${GREY}(%s) %s${RESET} %s${RESET}", time.Since(r.startedAt).Truncate(time.Millisecond).String(), incrementality, maybeFullTurbo)) + logger.Printf("${BOLD}>>> TURBO${RESET}") + logger.Printf("${BOLD}>>> BUILDING...FINISHED${RESET} ${GREY}(%s) %s${RESET} %s${RESET}", time.Since(r.startedAt).Truncate(time.Millisecond).String(), incrementality, maybeFullTurbo) } } diff --git a/cli/internal/run/run_test.go b/cli/internal/run/run_test.go deleted file mode 100644 index ce89f7a4f50c1..0000000000000 --- a/cli/internal/run/run_test.go +++ /dev/null @@ -1,287 +0,0 @@ -package run - -import ( - "fmt" - "os" - "path/filepath" - "reflect" - "testing" - - "github.com/mitchellh/cli" - "github.com/vercel/turborepo/cli/internal/fs" - - "github.com/stretchr/testify/assert" -) - -func TestParseConfig(t *testing.T) { - defaultCwd, err := os.Getwd() - if err != nil { - t.Errorf("failed to get cwd: %v", err) - } - defaultCacheFolder := filepath.Join(defaultCwd, filepath.FromSlash("node_modules/.cache/turbo")) - cases := []struct { - Name string - Args []string - Expected *RunOptions - }{ - { - "string flags", - []string{"foo"}, - &RunOptions{ - includeDependents: true, - stream: true, - bail: true, - dotGraph: "", - concurrency: 10, - includeDependencies: false, - cache: true, - forceExecution: false, - profile: "", - cwd: defaultCwd, - cacheFolder: defaultCacheFolder, - cacheHitLogsMode: FullLogs, - cacheMissLogsMode: FullLogs, - }, - }, - { - "cwd", - []string{"foo", "--cwd=zop"}, - &RunOptions{ - includeDependents: true, - stream: true, - bail: true, - dotGraph: "", - concurrency: 10, - includeDependencies: false, - cache: true, - forceExecution: false, - profile: "", - cwd: "zop", - cacheFolder: filepath.FromSlash("zop/node_modules/.cache/turbo"), - cacheHitLogsMode: FullLogs, - cacheMissLogsMode: FullLogs, - }, - }, - { - "scope", - []string{"foo", "--scope=foo", "--scope=blah"}, - &RunOptions{ - includeDependents: true, - stream: true, - bail: true, - dotGraph: "", - concurrency: 10, - includeDependencies: false, - cache: true, - forceExecution: false, - profile: "", - scope: []string{"foo", "blah"}, - cwd: defaultCwd, - cacheFolder: defaultCacheFolder, - cacheHitLogsMode: FullLogs, - cacheMissLogsMode: FullLogs, - }, - }, - { - "concurrency", - []string{"foo", "--concurrency=12"}, - &RunOptions{ - includeDependents: true, - stream: true, - bail: true, - dotGraph: "", - concurrency: 12, - includeDependencies: false, - cache: true, - forceExecution: false, - profile: "", - cwd: defaultCwd, - cacheFolder: defaultCacheFolder, - cacheHitLogsMode: FullLogs, - cacheMissLogsMode: FullLogs, - }, - }, - { - "graph", - []string{"foo", "--graph=g.png"}, - &RunOptions{ - includeDependents: true, - stream: true, - bail: true, - dotGraph: "g.png", - concurrency: 10, - includeDependencies: false, - cache: true, - forceExecution: false, - profile: "", - cwd: defaultCwd, - cacheFolder: defaultCacheFolder, - cacheHitLogsMode: FullLogs, - cacheMissLogsMode: FullLogs, - }, - }, - { - "passThroughArgs", - []string{"foo", "--graph=g.png", "--", "--boop", "zoop"}, - &RunOptions{ - includeDependents: true, - stream: true, - bail: true, - dotGraph: "g.png", - concurrency: 10, - includeDependencies: false, - cache: true, - forceExecution: false, - profile: "", - cwd: defaultCwd, - cacheFolder: defaultCacheFolder, - passThroughArgs: []string{"--boop", "zoop"}, - cacheHitLogsMode: FullLogs, - cacheMissLogsMode: FullLogs, - }, - }, - { - "Empty passThroughArgs", - []string{"foo", "--graph=g.png", "--"}, - &RunOptions{ - includeDependents: true, - stream: true, - bail: true, - dotGraph: "g.png", - concurrency: 10, - includeDependencies: false, - cache: true, - forceExecution: false, - profile: "", - cwd: defaultCwd, - cacheFolder: defaultCacheFolder, - passThroughArgs: []string{}, - cacheHitLogsMode: FullLogs, - cacheMissLogsMode: FullLogs, - }, - }, - { - "since and scope imply including dependencies for backwards compatibility", - []string{"foo", "--scope=bar", "--since=some-ref"}, - &RunOptions{ - includeDependents: true, - stream: true, - bail: true, - concurrency: 10, - includeDependencies: true, - cache: true, - cwd: defaultCwd, - cacheFolder: defaultCacheFolder, - scope: []string{"bar"}, - since: "some-ref", - cacheHitLogsMode: FullLogs, - cacheMissLogsMode: FullLogs, - }, - }, - } - - ui := &cli.BasicUi{ - Reader: os.Stdin, - Writer: os.Stdout, - ErrorWriter: os.Stderr, - } - - for i, tc := range cases { - t.Run(fmt.Sprintf("%d-%s", i, tc.Name), func(t *testing.T) { - - actual, err := parseRunArgs(tc.Args, ui) - if err != nil { - t.Fatalf("invalid parse: %#v", err) - } - assert.EqualValues(t, tc.Expected, actual) - }) - } -} - -func TestGetTargetsFromArguments(t *testing.T) { - type args struct { - arguments []string - configJson *fs.TurboConfigJSON - } - tests := []struct { - name string - args args - want []string - wantErr bool - }{ - { - name: "handles one defined target", - args: args{ - arguments: []string{"build"}, - configJson: &fs.TurboConfigJSON{ - Pipeline: map[string]fs.Pipeline{ - "build": {}, - "test": {}, - "thing#test": {}, - }, - }, - }, - want: []string{"build"}, - wantErr: false, - }, - { - name: "handles multiple targets and ignores flags", - args: args{ - arguments: []string{"build", "test", "--foo", "--bar"}, - configJson: &fs.TurboConfigJSON{ - Pipeline: map[string]fs.Pipeline{ - "build": {}, - "test": {}, - "thing#test": {}, - }, - }, - }, - want: []string{"build", "test"}, - wantErr: false, - }, - { - name: "handles pass through arguments after -- ", - args: args{ - arguments: []string{"build", "test", "--", "--foo", "build", "--cache-dir"}, - configJson: &fs.TurboConfigJSON{ - Pipeline: map[string]fs.Pipeline{ - "build": {}, - "test": {}, - "thing#test": {}, - }, - }, - }, - want: []string{"build", "test"}, - wantErr: false, - }, - { - name: "handles unknown pipeline targets ", - args: args{ - arguments: []string{"foo", "test", "--", "--foo", "build", "--cache-dir"}, - configJson: &fs.TurboConfigJSON{ - Pipeline: map[string]fs.Pipeline{ - "build": {}, - "test": {}, - "thing#test": {}, - }, - }, - }, - want: nil, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := getTargetsFromArguments(tt.args.arguments, tt.args.configJson) - if (err != nil) != tt.wantErr { - t.Errorf("GetTargetsFromArguments() error = %v, wantErr %v", err, tt.wantErr) - return - } - - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("GetTargetsFromArguments() = %v, want %v", got, tt.want) - } - }) - } -} diff --git a/cli/internal/scm/scm.go b/cli/internal/scm/scm.go index 530e120143101..bd658f32c7213 100644 --- a/cli/internal/scm/scm.go +++ b/cli/internal/scm/scm.go @@ -3,10 +3,9 @@ package scm import ( + "errors" "path/filepath" - "github.com/pkg/errors" - "github.com/vercel/turborepo/cli/internal/fs" ) diff --git a/cli/internal/scope/scope.go b/cli/internal/scope/scope.go index bd7ee9434e082..181366214c19e 100644 --- a/cli/internal/scope/scope.go +++ b/cli/internal/scope/scope.go @@ -5,10 +5,10 @@ import ( "strings" "github.com/hashicorp/go-hclog" - "github.com/mitchellh/cli" "github.com/pkg/errors" "github.com/vercel/turborepo/cli/internal/context" "github.com/vercel/turborepo/cli/internal/fs" + "github.com/vercel/turborepo/cli/internal/logger" "github.com/vercel/turborepo/cli/internal/scm" "github.com/vercel/turborepo/cli/internal/ui" "github.com/vercel/turborepo/cli/internal/util" @@ -25,7 +25,7 @@ type Opts struct { GlobalDepPatterns []string } -func ResolvePackages(opts *Opts, scm scm.SCM, ctx *context.Context, tui cli.Ui, logger hclog.Logger) (util.Set, error) { +func ResolvePackages(opts *Opts, scm scm.SCM, ctx *context.Context, logger *logger.Logger, hlogger hclog.Logger) (util.Set, error) { changedFiles, err := getChangedFiles(opts, scm) if err != nil { return nil, err @@ -132,12 +132,12 @@ func ResolvePackages(opts *Opts, scm scm.SCM, ctx *context.Context, tui cli.Ui, // resulting in a bunch of duplicate work as we look for descendents of something // that has already had all of its descendents included. for _, pkg := range filteredPkgs { - err = addDependents(filteredPkgs, pkg, ctx, logger) + err = addDependents(filteredPkgs, pkg, ctx, hlogger) if err != nil { return nil, err } } - logger.Debug("running with dependents") + hlogger.Debug("running with dependents") } if includeDependencies { @@ -145,12 +145,12 @@ func ResolvePackages(opts *Opts, scm scm.SCM, ctx *context.Context, tui cli.Ui, // resulting in a bunch of duplicate work as we look for dependencies of something // that has already had all of its dependencies included. for _, pkg := range filteredPkgs { - err = addDependencies(filteredPkgs, pkg, ctx, logger) + err = addDependencies(filteredPkgs, pkg, ctx, hlogger) if err != nil { return nil, err } } - logger.Debug(ui.Dim("running with dependencies")) + hlogger.Debug(ui.Dim("running with dependencies")) } return filteredPkgs, nil } diff --git a/cli/internal/scope/scope_test.go b/cli/internal/scope/scope_test.go index 7244cfa939f1c..82a11788255c0 100644 --- a/cli/internal/scope/scope_test.go +++ b/cli/internal/scope/scope_test.go @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/vercel/turborepo/cli/internal/context" "github.com/vercel/turborepo/cli/internal/fs" - "github.com/vercel/turborepo/cli/internal/ui" + "github.com/vercel/turborepo/cli/internal/logger" "github.com/vercel/turborepo/cli/internal/util" ) @@ -79,8 +79,8 @@ func (m *mockSCM) ChangedFiles(fromCommit string, includeUntracked bool, relativ } func TestResolvePackages(t *testing.T) { - tui := ui.Default() - logger := hclog.Default() + logger := logger.New() + hlogger := hclog.Default() // // app0 - // \ @@ -261,7 +261,7 @@ func TestResolvePackages(t *testing.T) { PackageNames: packageNames, TopologicalGraph: graph, SCC: scc, - }, tui, logger) + }, logger, hlogger) if err != nil { t.Errorf("expected no error, got %v", err) } diff --git a/cli/internal/ui/ui.go b/cli/internal/ui/ui.go index c4c9852ee0579..531dee5c5dac0 100644 --- a/cli/internal/ui/ui.go +++ b/cli/internal/ui/ui.go @@ -9,7 +9,6 @@ import ( "github.com/fatih/color" "github.com/mattn/go-isatty" - "github.com/mitchellh/cli" ) const ansiEscapeStr = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" @@ -62,18 +61,3 @@ func Rainbow(text string) string { return strings.Join(rainbowStr, "") } - -// Default returns the default colored ui -func Default() *cli.ColoredUi { - return &cli.ColoredUi{ - Ui: &cli.BasicUi{ - Reader: os.Stdin, - Writer: os.Stdout, - ErrorWriter: os.Stderr, - }, - OutputColor: cli.UiColorNone, - InfoColor: cli.UiColorNone, - WarnColor: cli.UiColorYellow, - ErrorColor: cli.UiColorRed, - } -} diff --git a/cli/internal/util/printf.go b/cli/internal/util/printf.go index 663434ecf159a..18aa8bdf24a91 100644 --- a/cli/internal/util/printf.go +++ b/cli/internal/util/printf.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "os" + "github.com/vercel/turborepo/cli/internal/ui" ) @@ -21,11 +22,11 @@ func Sprintf(format string, args ...interface{}) string { } func Printf(format string, args ...interface{}) { - fmt.Fprint(os.Stderr, os.Expand(fmt.Sprintf(format, args...), replace)) + Fprintf(os.Stderr, format, args...) } func Fprintf(writer io.Writer, format string, args ...interface{}) { - fmt.Fprint(writer, os.Expand(fmt.Sprintf(format, args...), replace)) + fmt.Fprint(writer, Sprintf(format, args...)) } func replace(s string) string { diff --git a/docs/package.json b/docs/package.json index 4c5a719d187bb..f9e5970debc61 100644 --- a/docs/package.json +++ b/docs/package.json @@ -6,7 +6,7 @@ "scripts": { "dev": "next", "start": "next start", - "build": "next build ", + "build": "next build", "lint": "next lint", "schema": "ts-json-schema-generator -p ./schema.d.ts -o ./public/schema.json -t Schema --minify" },