From 0e8c96bd915dda8ad5b9941fcfa92307943e8745 Mon Sep 17 00:00:00 2001 From: sam boyer Date: Sat, 15 Apr 2017 00:58:03 -0400 Subject: [PATCH 1/3] Update gps to v0.16.0 --- Gopkg.lock | 12 +- Gopkg.toml | 2 +- vendor/github.com/sdboyer/constext/LICENSE | 21 + vendor/github.com/sdboyer/constext/README.md | 78 ++ .../github.com/sdboyer/constext/constext.go | 123 +++ .../sdboyer/constext/constext_test.go | 156 ++++ .../sdboyer/gps/_testdata/cmd/echosleep.go | 2 +- .../sdboyer/gps/_testdata/src/skip_/_a.go | 11 + .../sdboyer/gps/_testdata/src/skip_/a.go | 12 + vendor/github.com/sdboyer/gps/bridge.go | 297 +------ vendor/github.com/sdboyer/gps/cmd.go | 45 +- vendor/github.com/sdboyer/gps/cmd_test.go | 12 +- .../github.com/sdboyer/gps/constraint_test.go | 8 +- vendor/github.com/sdboyer/gps/constraints.go | 46 +- vendor/github.com/sdboyer/gps/deduce.go | 349 +++++--- vendor/github.com/sdboyer/gps/deduce_test.go | 50 +- vendor/github.com/sdboyer/gps/example.go | 9 +- .../github.com/sdboyer/gps/filesystem_test.go | 154 ++++ vendor/github.com/sdboyer/gps/glide.lock | 2 + vendor/github.com/sdboyer/gps/hash.go | 6 +- vendor/github.com/sdboyer/gps/hash_test.go | 46 +- vendor/github.com/sdboyer/gps/lock_test.go | 2 +- vendor/github.com/sdboyer/gps/lockdiff.go | 253 ++++++ .../github.com/sdboyer/gps/lockdiff_test.go | 497 ++++++++++++ vendor/github.com/sdboyer/gps/manager_test.go | 526 ++++++------ vendor/github.com/sdboyer/gps/maybe_source.go | 183 +++-- .../github.com/sdboyer/gps/pkgtree/pkgtree.go | 4 + .../sdboyer/gps/pkgtree/pkgtree_test.go | 42 +- vendor/github.com/sdboyer/gps/result_test.go | 14 +- vendor/github.com/sdboyer/gps/rootdata.go | 3 + .../github.com/sdboyer/gps/rootdata_test.go | 8 +- vendor/github.com/sdboyer/gps/satisfy.go | 10 +- vendor/github.com/sdboyer/gps/selection.go | 4 +- .../sdboyer/gps/solve_basic_test.go | 73 +- .../sdboyer/gps/solve_bimodal_test.go | 2 +- vendor/github.com/sdboyer/gps/solve_test.go | 31 +- vendor/github.com/sdboyer/gps/solver.go | 35 +- vendor/github.com/sdboyer/gps/source.go | 746 ++++++++++-------- vendor/github.com/sdboyer/gps/source_cache.go | 219 +++++ .../github.com/sdboyer/gps/source_manager.go | 490 +++++------- vendor/github.com/sdboyer/gps/source_test.go | 585 +++----------- vendor/github.com/sdboyer/gps/strip_vendor.go | 26 + .../gps/strip_vendor_nonwindows_test.go | 142 ++++ .../sdboyer/gps/strip_vendor_test.go | 67 ++ .../sdboyer/gps/strip_vendor_windows.go | 41 + .../sdboyer/gps/strip_vendor_windows_test.go | 154 ++++ vendor/github.com/sdboyer/gps/typed_radix.go | 72 -- vendor/github.com/sdboyer/gps/vcs_repo.go | 206 ++--- .../github.com/sdboyer/gps/vcs_repo_test.go | 184 +++-- vendor/github.com/sdboyer/gps/vcs_source.go | 509 ++++-------- .../github.com/sdboyer/gps/vcs_source_test.go | 516 ++++++++++++ vendor/github.com/sdboyer/gps/version.go | 165 ++-- .../github.com/sdboyer/gps/version_queue.go | 6 +- .../sdboyer/gps/version_queue_test.go | 17 +- vendor/github.com/sdboyer/gps/version_test.go | 93 ++- .../github.com/sdboyer/gps/version_unifier.go | 260 ++++++ .../sdboyer/gps/version_unifier_test.go | 138 ++++ 57 files changed, 5065 insertions(+), 2699 deletions(-) create mode 100644 vendor/github.com/sdboyer/constext/LICENSE create mode 100644 vendor/github.com/sdboyer/constext/README.md create mode 100644 vendor/github.com/sdboyer/constext/constext.go create mode 100644 vendor/github.com/sdboyer/constext/constext_test.go create mode 100644 vendor/github.com/sdboyer/gps/_testdata/src/skip_/_a.go create mode 100644 vendor/github.com/sdboyer/gps/_testdata/src/skip_/a.go create mode 100644 vendor/github.com/sdboyer/gps/filesystem_test.go create mode 100644 vendor/github.com/sdboyer/gps/lockdiff.go create mode 100644 vendor/github.com/sdboyer/gps/lockdiff_test.go create mode 100644 vendor/github.com/sdboyer/gps/source_cache.go create mode 100644 vendor/github.com/sdboyer/gps/strip_vendor.go create mode 100644 vendor/github.com/sdboyer/gps/strip_vendor_nonwindows_test.go create mode 100644 vendor/github.com/sdboyer/gps/strip_vendor_test.go create mode 100644 vendor/github.com/sdboyer/gps/strip_vendor_windows.go create mode 100644 vendor/github.com/sdboyer/gps/strip_vendor_windows_test.go create mode 100644 vendor/github.com/sdboyer/gps/vcs_source_test.go create mode 100644 vendor/github.com/sdboyer/gps/version_unifier.go create mode 100644 vendor/github.com/sdboyer/gps/version_unifier_test.go diff --git a/Gopkg.lock b/Gopkg.lock index 37c5c92044..4bbb9473de 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,4 +1,4 @@ -memo = "2f117dcf1ebe1cff7acbddb851d29b87554d2249f1033f2a4cca3b33dc3fecc2" +memo = "940bdaea844d101260e58623a5bae0392cce009ab34d274e89058b780e880309" [[projects]] branch = "2.x" @@ -36,8 +36,14 @@ memo = "2f117dcf1ebe1cff7acbddb851d29b87554d2249f1033f2a4cca3b33dc3fecc2" revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" +[[projects]] + branch = "master" + name = "github.com/sdboyer/constext" + packages = ["."] + revision = "836a144573533ea4da4e6929c235fd348aed1c80" + [[projects]] name = "github.com/sdboyer/gps" packages = [".","internal","internal/fs","pkgtree"] - revision = "b0f646b744e74543c094023d05339ffb82458e35" - version = "v0.15.0" + revision = "f118745d0aaff02a6a627467fc773716d1df518c" + version = "v0.16.0" diff --git a/Gopkg.toml b/Gopkg.toml index 5c6b8a42f1..a3db92cf25 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -17,4 +17,4 @@ [[dependencies]] name = "github.com/sdboyer/gps" - version = ">=0.15.0, <1.0.0" + version = ">=0.16.0, <1.0.0" diff --git a/vendor/github.com/sdboyer/constext/LICENSE b/vendor/github.com/sdboyer/constext/LICENSE new file mode 100644 index 0000000000..fdbc31c75c --- /dev/null +++ b/vendor/github.com/sdboyer/constext/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Sam Boyer + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/sdboyer/constext/README.md b/vendor/github.com/sdboyer/constext/README.md new file mode 100644 index 0000000000..e267fd5478 --- /dev/null +++ b/vendor/github.com/sdboyer/constext/README.md @@ -0,0 +1,78 @@ +# constext [![Doc Status](https://godoc.org/github.com/sdboyer/constext?status.png)](https://godoc.org/github.com/sdboyer/constext) + +constext allows you to [`cons`](https://en.wikipedia.org/wiki/Cons) `Context`s +together as a pair, conjoining them for the purpose of all `Context` behaviors: + +1. If either parent context is canceled, the constext is canceled. The + err is set to whatever the err of the parent that was canceled. +2. If either parent has a deadline, the constext uses that same + deadline. If both have a deadline, it uses the sooner/lesser one. +3. Values from both parents are unioned together. When a key is present in both + parent trees, the left (first) context supercedes the right (second). + +Paired contexts can be recombined using the standard `context.With*()` +functions. + +## Usage + +Use is simple, and patterned after the `context` package. The `constext.Cons()` +function takes two `context.Context` arguments and returns a single, unified +one, along with a `context.CancelFunc`. + +```go +cctx, cancelFunc := constext.Cons(context.Background(), context.Background()) +``` + +True to the spirit of `cons`, recursive trees can be formed through +nesting: + +```go +bg := context.Background() +cctx := constext.Cons(bg, constext.Cons(bg, constext.Cons(bg, bg))) +``` + +This probably isn't a good idea, but it's possible. + +## Rationale + +While the unary model of context works well for the original vision - an object +operating within an [HTTP] request's scope - there are times when we need a +little more. + +For example: in [dep](https://github.com/golang/dep), the subsystem that +manages interaction with source repositories is called a +[`SourceManager`](https://godoc.org/github.com/sdboyer/gps#SourceManager). It +is a long-lived object; generally, only one is created over the course of any +single `dep` invocation. The `SourceManager` has a number of methods on it that +may initiate network and/or disk interaction. As such, these methods need to +take a `context.Context`, so that the caller can cancel them if needed. + +However, this is not sufficient. The `SourceManager` itself may need to be +terminated (e.g., if the process received a signal). In such a case, in-flight +method calls also need to be canceled, to avoid leaving disk in inconsistent +state. + +As a result, each in-flight request serves two parents - the initator of the +request, and the `SourceManager` itself. We can abstract away this complexity +by having a `Context` for each, and `Cons`ing them together on a per-call +basis. + +## Caveats + +_tl;dr: GC doesn't work right, so explicitly cancel constexts when done with them._ + +The stdlib context packages uses internal tree-walking trickery to avoid +spawning goroutines unless it actually has to. We can't rely on that same +trickery, in part because we can't access the tree internals, but also because +it's not so straightforward when multiple parents are involved. Consequently, +`Cons()` almost always must spawn a goroutine to ensure correct cancellation +behavior, whereas e.g. `context.WithCancel()` rarely has to. + +If, as in the use case above, your constext has one short-lived and one +long-lived parent, and the short-lived parent is not explicitly canceled (which +is typical), then until the long-lived parent is canceled, neither the +constext, nor any otherwise-unreachable members of the short-lived context tree +will be GCed. + +So, for now, explicitly cancel your constexts before they go out of scope, +otherwise you'll leak memory. diff --git a/vendor/github.com/sdboyer/constext/constext.go b/vendor/github.com/sdboyer/constext/constext.go new file mode 100644 index 0000000000..09e37709e5 --- /dev/null +++ b/vendor/github.com/sdboyer/constext/constext.go @@ -0,0 +1,123 @@ +// Package constext provides facilities for pairing contexts together so that +// they behave as one. + +package constext + +import ( + "context" + "sync" + "time" +) + +type constext struct { + car, cdr context.Context + done chan struct{} // chan closed on cancelFunc() call, or parent done + once sync.Once // protects cancel func + mu sync.Mutex // protects timer and err + err error // err set on cancel or timeout +} + +// Cons takes two Contexts and combines them into a pair, conjoining their +// behavior: +// +// - If either parent context is canceled, the constext is canceled. The err is +// set to whatever the err of the parent that was canceled. +// - If either parent has a deadline, the constext uses that same deadline. If +// both have a deadline, it uses the sooner/lesser one. +// - Values from both parents are unioned together. When a key is present in +// both parent trees, the left (first) context supercedes the right (second). +// +// All the normal context.With*() funcs should incorporate constexts correctly. +// +// If the two parent contexts both return a nil channel from Done() (which can +// occur if both parents are Background, or were created only through +// context.WithValue()), then the returned cancelFunc() is a no-op; calling it +// will NOT result in the termination of any sub-contexts later created. +func Cons(l, r context.Context) (context.Context, context.CancelFunc) { + cc := &constext{ + car: l, + cdr: r, + done: make(chan struct{}), + } + + if cc.car.Done() == nil && cc.cdr.Done() == nil { + // Both parents are un-cancelable, so it's more technically correct to + // return a no-op func here. + return cc, func() {} + } + + if cc.car.Err() != nil { + cc.cancel(cc.car.Err()) + return cc, func() {} + } + if cc.cdr.Err() != nil { + cc.cancel(cc.cdr.Err()) + return cc, func() {} + } + + go func() { + select { + case <-cc.car.Done(): + cc.cancel(cc.car.Err()) + case <-cc.cdr.Done(): + cc.cancel(cc.cdr.Err()) + case <-cc.done: + // Ensure the goroutine dies when canceled + } + }() + + return cc, func() { cc.cancel(context.Canceled) } +} + +func (cc *constext) cancel(err error) { + cc.once.Do(func() { + if err == nil { + panic("constext: internal error: missing cancel error") + } + + cc.mu.Lock() + if cc.err == nil { + cc.err = err + close(cc.done) + } + cc.mu.Unlock() + }) +} + +func (cc *constext) Deadline() (time.Time, bool) { + hdeadline, hok := cc.car.Deadline() + tdeadline, tok := cc.cdr.Deadline() + if !hok && !tok { + return time.Time{}, false + } + + if hok && !tok { + return hdeadline, true + } + if !hok && tok { + return tdeadline, true + } + + if hdeadline.Before(tdeadline) { + return hdeadline, true + } + return tdeadline, true +} + +func (cc *constext) Done() <-chan struct{} { + return cc.done +} + +func (cc *constext) Err() error { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.err +} + +func (cc *constext) Value(key interface{}) interface{} { + v := cc.car.Value(key) + if v != nil { + return v + } + return cc.cdr.Value(key) +} diff --git a/vendor/github.com/sdboyer/constext/constext_test.go b/vendor/github.com/sdboyer/constext/constext_test.go new file mode 100644 index 0000000000..907483b218 --- /dev/null +++ b/vendor/github.com/sdboyer/constext/constext_test.go @@ -0,0 +1,156 @@ +package constext + +import ( + "context" + "runtime" + "testing" + "time" +) + +var bgc = context.Background() + +func TestConsCancel(t *testing.T) { + c1, cancel1 := context.WithCancel(bgc) + c2, cancel2 := context.WithCancel(bgc) + + cc, _ := Cons(c1, c2) + if _, has := cc.Deadline(); has { + t.Fatal("constext should not have a deadline if parents do not") + } + + cancel1() + select { + case <-cc.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for parent to quit; stacks:\n%s", buf[:n]) + } + + cc, _ = Cons(c1, c2) + if cc.Err() == nil { + t.Fatal("pre-canceled car constext did not begin canceled") + } + + cc, _ = Cons(c2, c1) + if cc.Err() == nil { + t.Fatal("pre-canceled cdr constext did not begin canceled") + } + + c3, _ := context.WithCancel(bgc) + cc, _ = Cons(c3, c2) + cancel2() + select { + case <-cc.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cdr to quit; stacks:\n%s", buf[:n]) + } +} + +func TestCancelPassdown(t *testing.T) { + c1, cancel1 := context.WithCancel(bgc) + c2, _ := context.WithCancel(bgc) + cc, _ := Cons(c1, c2) + c3, _ := context.WithCancel(cc) + + cancel1() + select { + case <-c3.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for parent to quit; stacks:\n%s", buf[:n]) + } + + c1, cancel1 = context.WithCancel(bgc) + cc, _ = Cons(c1, c2) + c3 = context.WithValue(cc, "foo", "bar") + + cancel1() + select { + case <-c3.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for parent to quit; stacks:\n%s", buf[:n]) + } +} + +func TestValueUnion(t *testing.T) { + c1 := context.WithValue(bgc, "foo", "bar") + c2 := context.WithValue(bgc, "foo", "baz") + cc, _ := Cons(c1, c2) + + v := cc.Value("foo") + if v != "bar" { + t.Fatalf("wanted value of \"foo\" from car, \"bar\", got %q", v) + } + + c3 := context.WithValue(bgc, "bar", "quux") + cc2, _ := Cons(c1, c3) + v = cc2.Value("bar") + if v != "quux" { + t.Fatalf("wanted value from cdr, \"quux\", got %q", v) + } + + cc, _ = Cons(cc, c3) + v = cc.Value("bar") + if v != "quux" { + t.Fatalf("wanted value from nested cdr, \"quux\", got %q", v) + } +} + +func TestDeadline(t *testing.T) { + t1 := time.Now().Add(1 * time.Second) + c1, _ := context.WithDeadline(bgc, t1) + cc, _ := Cons(c1, bgc) + + cct, ok := cc.Deadline() + if !ok { + t.Fatal("constext claimed to not have any deadline, but car did") + } + if cct != t1 { + t.Fatal("constext did not have correct deadline") + } + + cc, _ = Cons(bgc, c1) + cct, ok = cc.Deadline() + if !ok { + t.Fatal("constext claimed to not have any deadline, but cdr did") + } + if cct != t1 { + t.Fatal("constext did not have correct deadline") + } + + t2 := time.Now().Add(1 * time.Second) + c2, _ := context.WithDeadline(bgc, t2) + cc, _ = Cons(c1, c2) + cct, ok = cc.Deadline() + if !ok { + t.Fatal("constext claimed to not have any deadline, but both parents did") + } + + if cct != t1 { + t.Fatal("got wrong deadline time back") + } + + cc, _ = Cons(c2, c1) + cct, ok = cc.Deadline() + if !ok { + t.Fatal("constext claimed to not have any deadline, but both parents did") + } + + if cct != t1 { + t.Fatal("got wrong deadline time back") + } + + select { + case <-cc.Done(): + case <-time.After(t1.Sub(time.Now()) + 5*time.Millisecond): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("car did not quit after deadline; stacks:\n%s", buf[:n]) + } +} diff --git a/vendor/github.com/sdboyer/gps/_testdata/cmd/echosleep.go b/vendor/github.com/sdboyer/gps/_testdata/cmd/echosleep.go index b6a1998d67..8c34ce3585 100644 --- a/vendor/github.com/sdboyer/gps/_testdata/cmd/echosleep.go +++ b/vendor/github.com/sdboyer/gps/_testdata/cmd/echosleep.go @@ -12,6 +12,6 @@ func main() { for i := 0; i < *n; i++ { fmt.Println("foo") - time.Sleep(time.Duration(i) * 100 * time.Millisecond) + time.Sleep(time.Duration(i) * 250 * time.Millisecond) } } diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/skip_/_a.go b/vendor/github.com/sdboyer/gps/_testdata/src/skip_/_a.go new file mode 100644 index 0000000000..1e13b2cc24 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/_testdata/src/skip_/_a.go @@ -0,0 +1,11 @@ +package skip + +import ( + "bytes" + "sort" +) + +var ( + _ = sort.Strings + _ = bytes.Buffer +) diff --git a/vendor/github.com/sdboyer/gps/_testdata/src/skip_/a.go b/vendor/github.com/sdboyer/gps/_testdata/src/skip_/a.go new file mode 100644 index 0000000000..ffc88f4cb8 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/_testdata/src/skip_/a.go @@ -0,0 +1,12 @@ +package skip + +import ( + "sort" + + "github.com/sdboyer/gps" +) + +var ( + _ = sort.Strings + _ = gps.Solve +) diff --git a/vendor/github.com/sdboyer/gps/bridge.go b/vendor/github.com/sdboyer/gps/bridge.go index ded26eee2e..8ee24f85f2 100644 --- a/vendor/github.com/sdboyer/gps/bridge.go +++ b/vendor/github.com/sdboyer/gps/bridge.go @@ -9,17 +9,29 @@ import ( "github.com/sdboyer/gps/pkgtree" ) -// sourceBridges provide an adapter to SourceManagers that tailor operations -// for a single solve run. +// sourceBridge is an adapter to SourceManagers that tailor operations for a +// single solve run. type sourceBridge interface { - SourceManager // composes SourceManager + // sourceBridge includes all the methods in the SourceManager interface except + // for Release(). + SourceExists(ProjectIdentifier) (bool, error) + SyncSourceFor(ProjectIdentifier) error + RevisionPresentIn(ProjectIdentifier, Revision) (bool, error) + ListPackages(ProjectIdentifier, Version) (pkgtree.PackageTree, error) + GetManifestAndLock(ProjectIdentifier, Version, ProjectAnalyzer) (Manifest, Lock, error) + ExportProject(ProjectIdentifier, Version, string) error + DeduceProjectRoot(ip string) (ProjectRoot, error) + + //sourceExists(ProjectIdentifier) (bool, error) + //syncSourceFor(ProjectIdentifier) error + listVersions(ProjectIdentifier) ([]Version, error) + //revisionPresentIn(ProjectIdentifier, Revision) (bool, error) + //listPackages(ProjectIdentifier, Version) (pkgtree.PackageTree, error) + //getManifestAndLock(ProjectIdentifier, Version, ProjectAnalyzer) (Manifest, Lock, error) + //exportProject(ProjectIdentifier, Version, string) error + //deduceProjectRoot(ip string) (ProjectRoot, error) verifyRootDir(path string) error - pairRevision(id ProjectIdentifier, r Revision) []Version - pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion - vendorCodeExists(id ProjectIdentifier) (bool, error) - matches(id ProjectIdentifier, c Constraint, v Version) bool - matchesAny(id ProjectIdentifier, c1, c2 Constraint) bool - intersect(id ProjectIdentifier, c1, c2 Constraint) Constraint + vendorCodeExists(ProjectIdentifier) (bool, error) breakLock() } @@ -40,9 +52,6 @@ type bridge struct { // held by the solver that it ends up being easier and saner to do this. s *solver - // Whether to sort version lists for downgrade. - down bool - // Simple, local cache of the root's PackageTree crp *struct { ptree pkgtree.PackageTree @@ -52,11 +61,14 @@ type bridge struct { // Map of project root name to their available version list. This cache is // layered on top of the proper SourceManager's cache; the only difference // is that this keeps the versions sorted in the direction required by the - // current solve run + // current solve run. vlists map[ProjectIdentifier][]Version // Indicates whether lock breaking has already been run lockbroken int32 + + // Whether to sort version lists for downgrade. + down bool } // Global factory func to create a bridge. This exists solely to allow tests to @@ -70,34 +82,30 @@ var mkBridge = func(s *solver, sm SourceManager, down bool) sourceBridge { } } -func (b *bridge) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) { +func (b *bridge) GetManifestAndLock(id ProjectIdentifier, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { if b.s.rd.isRoot(id.ProjectRoot) { return b.s.rd.rm, b.s.rd.rl, nil } b.s.mtr.push("b-gmal") - m, l, e := b.sm.GetManifestAndLock(id, v) + m, l, e := b.sm.GetManifestAndLock(id, v, an) b.s.mtr.pop() return m, l, e } -func (b *bridge) AnalyzerInfo() (string, int) { - return b.sm.AnalyzerInfo() -} - -func (b *bridge) ListVersions(id ProjectIdentifier) ([]Version, error) { +func (b *bridge) listVersions(id ProjectIdentifier) ([]Version, error) { if vl, exists := b.vlists[id]; exists { return vl, nil } b.s.mtr.push("b-list-versions") - vl, err := b.sm.ListVersions(id) - // TODO(sdboyer) cache errors, too? + pvl, err := b.sm.ListVersions(id) if err != nil { b.s.mtr.pop() return nil, err } + vl := hidePair(pvl) if b.down { SortForDowngrade(vl) } else { @@ -134,149 +142,6 @@ func (b *bridge) vendorCodeExists(id ProjectIdentifier) (bool, error) { return false, nil } -func (b *bridge) pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion { - vl, err := b.ListVersions(id) - if err != nil { - return nil - } - - b.s.mtr.push("b-pair-version") - // doing it like this is a bit sloppy - for _, v2 := range vl { - if p, ok := v2.(PairedVersion); ok { - if p.Matches(v) { - b.s.mtr.pop() - return p - } - } - } - - b.s.mtr.pop() - return nil -} - -func (b *bridge) pairRevision(id ProjectIdentifier, r Revision) []Version { - vl, err := b.ListVersions(id) - if err != nil { - return nil - } - - b.s.mtr.push("b-pair-rev") - p := []Version{r} - // doing it like this is a bit sloppy - for _, v2 := range vl { - if pv, ok := v2.(PairedVersion); ok { - if pv.Matches(r) { - p = append(p, pv) - } - } - } - - b.s.mtr.pop() - return p -} - -// matches performs a typical match check between the provided version and -// constraint. If that basic check fails and the provided version is incomplete -// (e.g. an unpaired version or bare revision), it will attempt to gather more -// information on one or the other and re-perform the comparison. -func (b *bridge) matches(id ProjectIdentifier, c Constraint, v Version) bool { - if c.Matches(v) { - return true - } - - b.s.mtr.push("b-matches") - // This approach is slightly wasteful, but just SO much less verbose, and - // more easily understood. - vtu := b.vtu(id, v) - - var uc Constraint - if cv, ok := c.(Version); ok { - uc = b.vtu(id, cv) - } else { - uc = c - } - - b.s.mtr.pop() - return uc.Matches(vtu) -} - -// matchesAny is the authoritative version of Constraint.MatchesAny. -func (b *bridge) matchesAny(id ProjectIdentifier, c1, c2 Constraint) bool { - if c1.MatchesAny(c2) { - return true - } - - b.s.mtr.push("b-matches-any") - // This approach is slightly wasteful, but just SO much less verbose, and - // more easily understood. - var uc1, uc2 Constraint - if v1, ok := c1.(Version); ok { - uc1 = b.vtu(id, v1) - } else { - uc1 = c1 - } - - if v2, ok := c2.(Version); ok { - uc2 = b.vtu(id, v2) - } else { - uc2 = c2 - } - - b.s.mtr.pop() - return uc1.MatchesAny(uc2) -} - -// intersect is the authoritative version of Constraint.Intersect. -func (b *bridge) intersect(id ProjectIdentifier, c1, c2 Constraint) Constraint { - rc := c1.Intersect(c2) - if rc != none { - return rc - } - - b.s.mtr.push("b-intersect") - // This approach is slightly wasteful, but just SO much less verbose, and - // more easily understood. - var uc1, uc2 Constraint - if v1, ok := c1.(Version); ok { - uc1 = b.vtu(id, v1) - } else { - uc1 = c1 - } - - if v2, ok := c2.(Version); ok { - uc2 = b.vtu(id, v2) - } else { - uc2 = c2 - } - - b.s.mtr.pop() - return uc1.Intersect(uc2) -} - -// vtu creates a versionTypeUnion for the provided version. -// -// This union may (and typically will) end up being nothing more than the single -// input version, but creating a versionTypeUnion guarantees that 'local' -// constraint checks (direct method calls) are authoritative. -func (b *bridge) vtu(id ProjectIdentifier, v Version) versionTypeUnion { - switch tv := v.(type) { - case Revision: - return versionTypeUnion(b.pairRevision(id, tv)) - case PairedVersion: - return versionTypeUnion(b.pairRevision(id, tv.Underlying())) - case UnpairedVersion: - pv := b.pairVersion(id, tv) - if pv == nil { - return versionTypeUnion{tv} - } - - return versionTypeUnion(b.pairRevision(id, pv.Underlying())) - } - - return nil -} - // listPackages lists all the packages contained within the given project at a // particular version. // @@ -352,105 +217,3 @@ func (b *bridge) SyncSourceFor(id ProjectIdentifier) error { // by the solver, and the metrics design is for wall time on a single thread return b.sm.SyncSourceFor(id) } - -// versionTypeUnion represents a set of versions that are, within the scope of -// this solver run, equivalent. -// -// The simple case here is just a pair - a normal version plus its underlying -// revision - but if a tag or branch point at the same rev, then we consider -// them equivalent. Again, however, this equivalency is short-lived; it must be -// re-assessed during every solver run. -// -// The union members are treated as being OR'd together: all constraint -// operations attempt each member, and will take the most open/optimistic -// answer. -// -// This technically does allow tags to match branches - something we otherwise -// try hard to avoid - but because the original input constraint never actually -// changes (and is never written out in the Solution), there's no harmful case -// of a user suddenly riding a branch when they expected a fixed tag. -type versionTypeUnion []Version - -// This should generally not be called, but is required for the interface. If it -// is called, we have a bigger problem (the type has escaped the solver); thus, -// panic. -func (vtu versionTypeUnion) String() string { - panic("versionTypeUnion should never be turned into a string; it is solver internal-only") -} - -// This should generally not be called, but is required for the interface. If it -// is called, we have a bigger problem (the type has escaped the solver); thus, -// panic. -func (vtu versionTypeUnion) Type() VersionType { - panic("versionTypeUnion should never need to answer a Type() call; it is solver internal-only") -} - -// Matches takes a version, and returns true if that version matches any version -// contained in the union. -// -// This DOES allow tags to match branches, albeit indirectly through a revision. -func (vtu versionTypeUnion) Matches(v Version) bool { - vtu2, otherIs := v.(versionTypeUnion) - - for _, v1 := range vtu { - if otherIs { - for _, v2 := range vtu2 { - if v1.Matches(v2) { - return true - } - } - } else if v1.Matches(v) { - return true - } - } - - return false -} - -// MatchesAny returns true if any of the contained versions (which are also -// constraints) in the union successfully MatchAny with the provided -// constraint. -func (vtu versionTypeUnion) MatchesAny(c Constraint) bool { - vtu2, otherIs := c.(versionTypeUnion) - - for _, v1 := range vtu { - if otherIs { - for _, v2 := range vtu2 { - if v1.MatchesAny(v2) { - return true - } - } - } else if v1.MatchesAny(c) { - return true - } - } - - return false -} - -// Intersect takes a constraint, and attempts to intersect it with all the -// versions contained in the union until one returns non-none. If that never -// happens, then none is returned. -// -// In order to avoid weird version floating elsewhere in the solver, the union -// always returns the input constraint. (This is probably obviously correct, but -// is still worth noting.) -func (vtu versionTypeUnion) Intersect(c Constraint) Constraint { - vtu2, otherIs := c.(versionTypeUnion) - - for _, v1 := range vtu { - if otherIs { - for _, v2 := range vtu2 { - if rc := v1.Intersect(v2); rc != none { - return rc - } - } - } else if rc := v1.Intersect(c); rc != none { - return rc - } - } - - return none -} - -func (vtu versionTypeUnion) _private() {} diff --git a/vendor/github.com/sdboyer/gps/cmd.go b/vendor/github.com/sdboyer/gps/cmd.go index eabda0f994..ca0e7c3f31 100644 --- a/vendor/github.com/sdboyer/gps/cmd.go +++ b/vendor/github.com/sdboyer/gps/cmd.go @@ -2,6 +2,7 @@ package gps import ( "bytes" + "context" "fmt" "os/exec" "sync" @@ -11,27 +12,36 @@ import ( ) // monitoredCmd wraps a cmd and will keep monitoring the process until it -// finishes or a certain amount of time has passed and the command showed -// no signs of activity. +// finishes, the provided context is canceled, or a certain amount of time has +// passed and the command showed no signs of activity. type monitoredCmd struct { cmd *exec.Cmd timeout time.Duration + ctx context.Context stdout *activityBuffer stderr *activityBuffer } func newMonitoredCmd(cmd *exec.Cmd, timeout time.Duration) *monitoredCmd { - stdout := newActivityBuffer() - stderr := newActivityBuffer() - cmd.Stderr = stderr - cmd.Stdout = stdout - return &monitoredCmd{cmd, timeout, stdout, stderr} + stdout, stderr := newActivityBuffer(), newActivityBuffer() + cmd.Stdout, cmd.Stderr = stdout, stderr + return &monitoredCmd{ + cmd: cmd, + timeout: timeout, + stdout: stdout, + stderr: stderr, + } } // run will wait for the command to finish and return the error, if any. If the // command does not show any activity for more than the specified timeout the // process will be killed. -func (c *monitoredCmd) run() error { +func (c *monitoredCmd) run(ctx context.Context) error { + // Check for cancellation before even starting + if ctx.Err() != nil { + return ctx.Err() + } + ticker := time.NewTicker(c.timeout) done := make(chan error, 1) defer ticker.Stop() @@ -52,6 +62,11 @@ func (c *monitoredCmd) run() error { return &timeoutError{c.timeout} } + case <-ctx.Done(): + if err := c.cmd.Process.Kill(); err != nil { + return &killCmdError{err} + } + return c.ctx.Err() case err := <-done: return err } @@ -64,8 +79,8 @@ func (c *monitoredCmd) hasTimedOut() bool { c.stdout.lastActivity().Before(t) } -func (c *monitoredCmd) combinedOutput() ([]byte, error) { - if err := c.run(); err != nil { +func (c *monitoredCmd) combinedOutput(ctx context.Context) ([]byte, error) { + if err := c.run(ctx); err != nil { return c.stderr.buf.Bytes(), err } @@ -112,15 +127,15 @@ type killCmdError struct { } func (e killCmdError) Error() string { - return fmt.Sprintf("error killing command after timeout: %s", e.err) + return fmt.Sprintf("error killing command: %s", e.err) } -func runFromCwd(cmd string, args ...string) ([]byte, error) { +func runFromCwd(ctx context.Context, cmd string, args ...string) ([]byte, error) { c := newMonitoredCmd(exec.Command(cmd, args...), 2*time.Minute) - return c.combinedOutput() + return c.combinedOutput(ctx) } -func runFromRepoDir(repo vcs.Repo, cmd string, args ...string) ([]byte, error) { +func runFromRepoDir(ctx context.Context, repo vcs.Repo, cmd string, args ...string) ([]byte, error) { c := newMonitoredCmd(repo.CmdFromDir(cmd, args...), 2*time.Minute) - return c.combinedOutput() + return c.combinedOutput(ctx) } diff --git a/vendor/github.com/sdboyer/gps/cmd_test.go b/vendor/github.com/sdboyer/gps/cmd_test.go index 9434aba7bc..70ffa0ef58 100644 --- a/vendor/github.com/sdboyer/gps/cmd_test.go +++ b/vendor/github.com/sdboyer/gps/cmd_test.go @@ -1,6 +1,7 @@ package gps import ( + "context" "fmt" "os" "os/exec" @@ -11,11 +12,16 @@ import ( func mkTestCmd(iterations int) *monitoredCmd { return newMonitoredCmd( exec.Command("./echosleep", "-n", fmt.Sprint(iterations)), - 200*time.Millisecond, + 500*time.Millisecond, ) } func TestMonitoredCmd(t *testing.T) { + // Sleeps make this a bit slow + if testing.Short() { + t.Skip("skipping test with sleeps on short") + } + err := exec.Command("go", "build", "./_testdata/cmd/echosleep.go").Run() if err != nil { t.Errorf("Unable to build echosleep binary: %s", err) @@ -23,7 +29,7 @@ func TestMonitoredCmd(t *testing.T) { defer os.Remove("./echosleep") cmd := mkTestCmd(2) - err = cmd.run() + err = cmd.run(context.Background()) if err != nil { t.Errorf("Expected command not to fail: %s", err) } @@ -34,7 +40,7 @@ func TestMonitoredCmd(t *testing.T) { } cmd2 := mkTestCmd(10) - err = cmd2.run() + err = cmd2.run(context.Background()) if err == nil { t.Error("Expected command to fail") } diff --git a/vendor/github.com/sdboyer/gps/constraint_test.go b/vendor/github.com/sdboyer/gps/constraint_test.go index 16f54b9de9..fe301af47f 100644 --- a/vendor/github.com/sdboyer/gps/constraint_test.go +++ b/vendor/github.com/sdboyer/gps/constraint_test.go @@ -590,8 +590,7 @@ func TestSemverConstraintOps(t *testing.T) { // still an incomparable type c1, err := NewSemverConstraint("=1.0.0") if err != nil { - t.Errorf("Failed to create constraint: %s", err) - t.FailNow() + t.Fatalf("Failed to create constraint: %s", err) } if !c1.MatchesAny(any) { @@ -610,8 +609,7 @@ func TestSemverConstraintOps(t *testing.T) { c1, err = NewSemverConstraint(">= 1.0.0") if err != nil { - t.Errorf("Failed to create constraint: %s", err) - t.FailNow() + t.Fatalf("Failed to create constraint: %s", err) } if c1.Matches(v1) { @@ -897,7 +895,7 @@ func TestTypedConstraintString(t *testing.T) { } for _, fix := range table { - got := typedConstraintString(fix.in) + got := fix.in.typedString() if got != fix.out { t.Errorf("Typed string for %v (%T) was not expected %q; got %q", fix.in, fix.in, fix.out, got) } diff --git a/vendor/github.com/sdboyer/gps/constraints.go b/vendor/github.com/sdboyer/gps/constraints.go index 07de60a444..0af6975f6f 100644 --- a/vendor/github.com/sdboyer/gps/constraints.go +++ b/vendor/github.com/sdboyer/gps/constraints.go @@ -20,42 +20,28 @@ var ( // magic to operate. type Constraint interface { fmt.Stringer + // Matches indicates if the provided Version is allowed by the Constraint. Matches(Version) bool + // MatchesAny indicates if the intersection of the Constraint with the // provided Constraint would yield a Constraint that could allow *any* // Version. MatchesAny(Constraint) bool + // Intersect computes the intersection of the Constraint with the provided // Constraint. Intersect(Constraint) Constraint - _private() -} - -// typedConstraintString emits the normal stringified representation of the -// provided constraint, prefixed with a string that uniquely identifies the type -// of the constraint. -func typedConstraintString(c Constraint) string { - var prefix string - - switch tc := c.(type) { - case Version: - return typedVersionString(tc) - case semverConstraint: - prefix = "svc" - case anyConstraint: - prefix = "any" - case noneConstraint: - prefix = "none" - } - return fmt.Sprintf("%s-%s", prefix, c.String()) + // typedString emits the normal stringified representation of the provided + // constraint, prefixed with a string that uniquely identifies the type of + // the constraint. + // + // It also forces Constraint to be a private/sealed interface, which is a + // design goal of the system. + typedString() string } -func (semverConstraint) _private() {} -func (anyConstraint) _private() {} -func (noneConstraint) _private() {} - // NewSemverConstraint attempts to construct a semver Constraint object from the // input string. // @@ -82,6 +68,10 @@ func (c semverConstraint) String() string { return c.c.String() } +func (c semverConstraint) typedString() string { + return fmt.Sprintf("svc-%s", c.c.String()) +} + func (c semverConstraint) Matches(v Version) bool { switch tv := v.(type) { case versionTypeUnion: @@ -159,6 +149,10 @@ func (anyConstraint) String() string { return "*" } +func (anyConstraint) typedString() string { + return "any-*" +} + func (anyConstraint) Matches(Version) bool { return true } @@ -179,6 +173,10 @@ func (noneConstraint) String() string { return "" } +func (noneConstraint) typedString() string { + return "none-" +} + func (noneConstraint) Matches(Version) bool { return false } diff --git a/vendor/github.com/sdboyer/gps/deduce.go b/vendor/github.com/sdboyer/gps/deduce.go index b14b16f77d..a105bb59b8 100644 --- a/vendor/github.com/sdboyer/gps/deduce.go +++ b/vendor/github.com/sdboyer/gps/deduce.go @@ -1,6 +1,8 @@ package gps import ( + "context" + "errors" "fmt" "io" "net/http" @@ -9,6 +11,9 @@ import ( "regexp" "strconv" "strings" + "sync" + + radix "github.com/armon/go-radix" ) var ( @@ -519,76 +524,147 @@ func (m vcsExtensionDeducer) deduceSource(path string, u *url.URL) (maybeSource, } } -type stringFuture func() (string, error) -type sourceFuture func() (source, string, error) -type partialSourceFuture func(string, ProjectAnalyzer) sourceFuture +// A deducer takes an import path and inspects it to determine where the +// corresponding project root should be. It applies a number of matching +// techniques, eventually falling back to an HTTP request for go-get metadata if +// none of the explicit rules succeed. +// +// The only real implementation is deductionCoordinator. The interface is +// primarily intended for testing purposes. +type deducer interface { + deduceRootPath(ctx context.Context, path string) (pathDeduction, error) +} + +type deductionCoordinator struct { + suprvsr *supervisor + mut sync.RWMutex + rootxt *radix.Tree + deducext *deducerTrie +} + +func newDeductionCoordinator(superv *supervisor) *deductionCoordinator { + dc := &deductionCoordinator{ + suprvsr: superv, + rootxt: radix.New(), + deducext: pathDeducerTrie(), + } -type deductionFuture struct { - // rslow indicates that the root future may be a slow call (that it has to - // hit the network for some reason) - rslow bool - root stringFuture - psf partialSourceFuture + return dc } -// deduceFromPath takes an import path and attempts to deduce various +// deduceRootPath takes an import path and attempts to deduce various // metadata about it - what type of source should handle it, and where its // "root" is (for vcs repositories, the repository root). // -// The results are wrapped in futures, as most of these operations require at -// least some network activity to complete. For the first return value, network -// activity will be triggered when the future is called. For the second, -// network activity is triggered only when calling the sourceFuture returned -// from the partialSourceFuture. -func (sm *SourceMgr) deduceFromPath(path string) (deductionFuture, error) { - opath := path - u, path, err := normalizeURI(path) - if err != nil { - return deductionFuture{}, err +// If no errors are encountered, the returned pathDeduction will contain both +// the root path and a list of maybeSources, which can be subsequently used to +// create a handler that will manage the particular source. +func (dc *deductionCoordinator) deduceRootPath(ctx context.Context, path string) (pathDeduction, error) { + if dc.suprvsr.getLifetimeContext().Err() != nil { + return pathDeduction{}, errors.New("deductionCoordinator has been terminated") + } + + // First, check the rootxt to see if there's a prefix match - if so, we + // can return that and move on. + dc.mut.RLock() + prefix, data, has := dc.rootxt.LongestPrefix(path) + dc.mut.RUnlock() + if has && isPathPrefixOrEqual(prefix, path) { + switch d := data.(type) { + case maybeSource: + return pathDeduction{root: prefix, mb: d}, nil + case *httpMetadataDeducer: + // Multiple calls have come in for a similar path shape during + // the window in which the HTTP request to retrieve go get + // metadata is in flight. Fold this request in with the existing + // one(s) by calling the deduction method, which will avoid + // duplication of work through a sync.Once. + return d.deduce(ctx, path) + } + + panic(fmt.Sprintf("unexpected %T in deductionCoordinator.rootxt: %v", data, data)) } - // Helpers to futurize the results from deducers - strfut := func(s string) stringFuture { - return func() (string, error) { - return s, nil - } + // No match. Try known path deduction first. + pd, err := dc.deduceKnownPaths(path) + if err == nil { + // Deduction worked; store it in the rootxt, send on retchan and + // terminate. + // FIXME(sdboyer) deal with changing path vs. root. Probably needs + // to be predeclared and reused in the hmd returnFunc + dc.mut.Lock() + dc.rootxt.Insert(pd.root, pd.mb) + dc.mut.Unlock() + return pd, nil + } + + if err != errNoKnownPathMatch { + return pathDeduction{}, err + } + + // The err indicates no known path matched. It's still possible that + // retrieving go get metadata might do the trick. + hmd := &httpMetadataDeducer{ + basePath: path, + suprvsr: dc.suprvsr, + // The vanity deducer will call this func with a completed + // pathDeduction if it succeeds in finding one. We process it + // back through the action channel to ensure serialized + // access to the rootxt map. + returnFunc: func(pd pathDeduction) { + dc.mut.Lock() + dc.rootxt.Insert(pd.root, pd.mb) + + if pd.root != path { + // Replace the vanity deducer with a real result set, so + // that subsequent deductions don't hit the network + // again. + dc.rootxt.Insert(path, pd.mb) + } + dc.mut.Unlock() + }, } - srcfut := func(mb maybeSource) partialSourceFuture { - return func(cachedir string, an ProjectAnalyzer) sourceFuture { - var src source - var ident string - var err error + // Save the hmd in the rootxt so that calls checking on similar + // paths made while the request is in flight can be folded together. + dc.mut.Lock() + dc.rootxt.Insert(path, hmd) + dc.mut.Unlock() + + // Trigger the HTTP-backed deduction process for this requestor. + return hmd.deduce(ctx, path) +} - c := make(chan struct{}, 1) - go func() { - defer close(c) - src, ident, err = mb.try(cachedir, an) - }() +// pathDeduction represents the results of a successful import path deduction - +// a root path, plus a maybeSource that can be used to attempt to connect to +// the source. +type pathDeduction struct { + root string + mb maybeSource +} - return func() (source, string, error) { - <-c - return src, ident, err - } - } +var errNoKnownPathMatch = errors.New("no known path match") + +func (dc *deductionCoordinator) deduceKnownPaths(path string) (pathDeduction, error) { + u, path, err := normalizeURI(path) + if err != nil { + return pathDeduction{}, err } // First, try the root path-based matches - if _, mtchi, has := sm.dxt.LongestPrefix(path); has { - mtch := mtchi.(pathDeducer) + if _, mtch, has := dc.deducext.LongestPrefix(path); has { root, err := mtch.deduceRoot(path) if err != nil { - return deductionFuture{}, err + return pathDeduction{}, err } mb, err := mtch.deduceSource(path, u) if err != nil { - return deductionFuture{}, err + return pathDeduction{}, err } - return deductionFuture{ - rslow: false, - root: strfut(root), - psf: srcfut(mb), + return pathDeduction{ + root: root, + mb: mb, }, nil } @@ -597,96 +673,99 @@ func (sm *SourceMgr) deduceFromPath(path string) (deductionFuture, error) { if root, err := exm.deduceRoot(path); err == nil { mb, err := exm.deduceSource(path, u) if err != nil { - return deductionFuture{}, err + return pathDeduction{}, err } - return deductionFuture{ - rslow: false, - root: strfut(root), - psf: srcfut(mb), + return pathDeduction{ + root: root, + mb: mb, }, nil } - // No luck so far. maybe it's one of them vanity imports? - // We have to get a little fancier for the metadata lookup by chaining the - // source future onto the metadata future - - // Declare these out here so they're available for the source future - var vcs string - var ru *url.URL - - // Kick off the vanity metadata fetch - var importroot string - var futerr error - c := make(chan struct{}, 1) - go func() { - defer close(c) - var reporoot string - importroot, vcs, reporoot, futerr = parseMetadata(path) - if futerr != nil { - futerr = fmt.Errorf("unable to deduce repository and source type for: %q", opath) + return pathDeduction{}, errNoKnownPathMatch +} + +type httpMetadataDeducer struct { + once sync.Once + deduced pathDeduction + deduceErr error + basePath string + returnFunc func(pathDeduction) + suprvsr *supervisor +} + +func (hmd *httpMetadataDeducer) deduce(ctx context.Context, path string) (pathDeduction, error) { + hmd.once.Do(func() { + opath := path + u, path, err := normalizeURI(path) + if err != nil { + hmd.deduceErr = err return } + pd := pathDeduction{} + + // Make the HTTP call to attempt to retrieve go-get metadata + var root, vcs, reporoot string + err = hmd.suprvsr.do(ctx, path, ctHTTPMetadata, func(ctx context.Context) error { + root, vcs, reporoot, err = parseMetadata(ctx, path, u.Scheme) + return err + }) + if err != nil { + hmd.deduceErr = fmt.Errorf("unable to deduce repository and source type for: %q", opath) + return + } + pd.root = root + // If we got something back at all, then it supercedes the actual input for // the real URL to hit - ru, futerr = url.Parse(reporoot) - if futerr != nil { - futerr = fmt.Errorf("server returned bad URL when searching for vanity import: %q", reporoot) - importroot = "" + repoURL, err := url.Parse(reporoot) + if err != nil { + hmd.deduceErr = fmt.Errorf("server returned bad URL in go-get metadata: %q", reporoot) return } - }() - // Set up the root func to catch the result - root := func() (string, error) { - <-c - return importroot, futerr - } - - src := func(cachedir string, an ProjectAnalyzer) sourceFuture { - var src source - var ident string - var err error - - c := make(chan struct{}, 1) - go func() { - defer close(c) - // make sure the metadata future is finished (without errors), thus - // guaranteeing that ru and vcs will be populated - _, err = root() - if err != nil { + // If the input path specified a scheme, then try to honor it. + if u.Scheme != "" && repoURL.Scheme != u.Scheme { + // If the input scheme was http, but the go-get metadata + // nevertheless indicated https should be used for the repo, then + // trust the metadata and use https. + // + // To err on the secure side, do NOT allow the same in the other + // direction (https -> http). + if u.Scheme != "http" || repoURL.Scheme != "https" { + hmd.deduceErr = fmt.Errorf("scheme mismatch for %q: input asked for %q, but go-get metadata specified %q", path, u.Scheme, repoURL.Scheme) return } - ident = ru.String() + } - var m maybeSource - switch vcs { - case "git": - m = maybeGitSource{url: ru} - case "bzr": - m = maybeBzrSource{url: ru} - case "hg": - m = maybeHgSource{url: ru} - } + switch vcs { + case "git": + pd.mb = maybeGitSource{url: repoURL} + case "bzr": + pd.mb = maybeBzrSource{url: repoURL} + case "hg": + pd.mb = maybeHgSource{url: repoURL} + default: + hmd.deduceErr = fmt.Errorf("unsupported vcs type %s in go-get metadata from %s", vcs, path) + return + } - if m != nil { - src, ident, err = m.try(cachedir, an) - } else { - err = fmt.Errorf("unsupported vcs type %s", vcs) - } - }() + hmd.deduced = pd + // All data is assigned for other goroutines that may be waiting. Now, + // send the pathDeduction back to the deductionCoordinator by calling + // the returnFunc. This will also remove the reference to this hmd in + // the coordinator's trie. + // + // When this call finishes, it is guaranteed the coordinator will have + // at least begun running the action to insert the path deduction, which + // means no other deduction request will be able to interleave and + // request the same path before the pathDeduction can be processed, but + // after this hmd has been dereferenced from the trie. + hmd.returnFunc(pd) + }) - return func() (source, string, error) { - <-c - return src, ident, err - } - } - return deductionFuture{ - rslow: true, - root: root, - psf: src, - }, nil + return hmd.deduced, hmd.deduceErr } func normalizeURI(p string) (u *url.URL, newpath string, err error) { @@ -725,31 +804,41 @@ func normalizeURI(p string) (u *url.URL, newpath string, err error) { } // fetchMetadata fetches the remote metadata for path. -func fetchMetadata(path string) (rc io.ReadCloser, err error) { +func fetchMetadata(ctx context.Context, path, scheme string) (rc io.ReadCloser, err error) { defer func() { if err != nil { err = fmt.Errorf("unable to determine remote metadata protocol: %s", err) } }() - // try https first - rc, err = doFetchMetadata("https", path) + if scheme == "http" { + rc, err = doFetchMetadata(ctx, "http", path) + return + } + + rc, err = doFetchMetadata(ctx, "https", path) if err == nil { return } - rc, err = doFetchMetadata("http", path) + rc, err = doFetchMetadata(ctx, "http", path) return } -func doFetchMetadata(scheme, path string) (io.ReadCloser, error) { +func doFetchMetadata(ctx context.Context, scheme, path string) (io.ReadCloser, error) { url := fmt.Sprintf("%s://%s?go-get=1", scheme, path) switch scheme { case "https", "http": - resp, err := http.Get(url) + req, err := http.NewRequest("GET", url, nil) if err != nil { return nil, fmt.Errorf("failed to access url %q", url) } + + resp, err := http.DefaultClient.Do(req.WithContext(ctx)) + if err != nil { + return nil, fmt.Errorf("failed to access url %q", url) + } + return resp.Body, nil default: return nil, fmt.Errorf("unknown remote protocol scheme: %q", scheme) @@ -757,8 +846,12 @@ func doFetchMetadata(scheme, path string) (io.ReadCloser, error) { } // parseMetadata fetches and decodes remote metadata for path. -func parseMetadata(path string) (string, string, string, error) { - rc, err := fetchMetadata(path) +// +// scheme is optional. If it's http, only http will be attempted for fetching. +// Any other scheme (including none) will first try https, then fall back to +// http. +func parseMetadata(ctx context.Context, path, scheme string) (string, string, string, error) { + rc, err := fetchMetadata(ctx, path, scheme) if err != nil { return "", "", "", err } diff --git a/vendor/github.com/sdboyer/gps/deduce_test.go b/vendor/github.com/sdboyer/gps/deduce_test.go index 5044538400..a4c5990e3d 100644 --- a/vendor/github.com/sdboyer/gps/deduce_test.go +++ b/vendor/github.com/sdboyer/gps/deduce_test.go @@ -2,11 +2,11 @@ package gps import ( "bytes" + "context" "errors" "fmt" "net/url" "reflect" - "sync" "testing" ) @@ -483,7 +483,10 @@ var pathDeductionFixtures = map[string][]pathDeductionFixture{ func TestDeduceFromPath(t *testing.T) { for typ, fixtures := range pathDeductionFixtures { + typ, fixtures := typ, fixtures t.Run(typ, func(t *testing.T) { + t.Parallel() + var deducer pathDeducer switch typ { case "github": @@ -533,7 +536,9 @@ func TestDeduceFromPath(t *testing.T) { } for _, fix := range fixtures { + fix := fix t.Run(fix.in, func(t *testing.T) { + t.Parallel() u, in, uerr := normalizeURI(fix.in) if uerr != nil { if fix.rerr == nil { @@ -591,13 +596,14 @@ func TestVanityDeduction(t *testing.T) { defer clean() vanities := pathDeductionFixtures["vanity"] - wg := &sync.WaitGroup{} - wg.Add(len(vanities)) - - for _, fix := range vanities { - go func(fix pathDeductionFixture) { - defer wg.Done() + // group to avoid sourcemanager cleanup + ctx := context.Background() + t.Run("vanity", func(t *testing.T) { + for _, fix := range vanities { + fix := fix t.Run(fmt.Sprintf("%s", fix.in), func(t *testing.T) { + t.Parallel() + pr, err := sm.DeduceProjectRoot(fix.in) if err != nil { t.Errorf("Unexpected err on deducing project root: %s", err) @@ -606,27 +612,33 @@ func TestVanityDeduction(t *testing.T) { t.Errorf("Deducer did not return expected root:\n\t(GOT) %s\n\t(WNT) %s", pr, fix.root) } - ft, err := sm.deducePathAndProcess(fix.in) + pd, err := sm.deduceCoord.deduceRootPath(ctx, fix.in) if err != nil { t.Errorf("Unexpected err on deducing source: %s", err) return } - _, ident, err := ft.srcf() - if err != nil { - t.Errorf("Unexpected err on executing source future: %s", err) - return - } - - ustr := fix.mb.(maybeGitSource).url.String() - if ident != ustr { - t.Errorf("Deduced repo ident does not match fixture:\n\t(GOT) %s\n\t(WNT) %s", ident, ustr) + goturl, wanturl := pd.mb.(maybeGitSource).url.String(), fix.mb.(maybeGitSource).url.String() + if goturl != wanturl { + t.Errorf("Deduced repo ident does not match fixture:\n\t(GOT) %s\n\t(WNT) %s", goturl, wanturl) } }) - }(fix) + } + }) +} + +func TestVanityDeductionSchemeMismatch(t *testing.T) { + if testing.Short() { + t.Skip("Skipping slow test in short mode") } - wg.Wait() + ctx := context.Background() + cm := newSupervisor(ctx) + dc := newDeductionCoordinator(cm) + _, err := dc.deduceRootPath(ctx, "ssh://golang.org/exp") + if err == nil { + t.Error("should have errored on scheme mismatch between input and go-get metadata") + } } // borrow from stdlib diff --git a/vendor/github.com/sdboyer/gps/example.go b/vendor/github.com/sdboyer/gps/example.go index dd1225454b..063d93d43b 100644 --- a/vendor/github.com/sdboyer/gps/example.go +++ b/vendor/github.com/sdboyer/gps/example.go @@ -31,16 +31,17 @@ func main() { // Set up params, including tracing params := gps.SolveParameters{ - RootDir: root, - Trace: true, - TraceLogger: log.New(os.Stdout, "", 0), + RootDir: root, + Trace: true, + TraceLogger: log.New(os.Stdout, "", 0), + ProjectAnalyzer: NaiveAnalyzer{}, } // Perform static analysis on the current project to find all of its imports. params.RootPackageTree, _ = pkgtree.ListPackages(root, importroot) // Set up a SourceManager. This manages interaction with sources (repositories). tempdir, _ := ioutil.TempDir("", "gps-repocache") - sourcemgr, _ := gps.NewSourceManager(NaiveAnalyzer{}, filepath.Join(tempdir)) + sourcemgr, _ := gps.NewSourceManager(filepath.Join(tempdir)) defer sourcemgr.Release() // Prep and run the solver diff --git a/vendor/github.com/sdboyer/gps/filesystem_test.go b/vendor/github.com/sdboyer/gps/filesystem_test.go new file mode 100644 index 0000000000..2e3513f871 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/filesystem_test.go @@ -0,0 +1,154 @@ +package gps + +import ( + "os" + "path/filepath" + "testing" +) + +// This file contains utilities for running tests around file system state. + +// fspath represents a file system path in an OS-agnostic way. +type fsPath []string + +func (f fsPath) String() string { return filepath.Join(f...) } + +func (f fsPath) prepend(prefix string) fsPath { + p := fsPath{filepath.FromSlash(prefix)} + return append(p, f...) +} + +type fsTestCase struct { + before, after filesystemState +} + +// filesystemState represents the state of a file system. It has a setup method +// which inflates its state to the actual host file system, and an assert +// method which checks that the actual file system matches the described state. +type filesystemState struct { + root string + dirs []fsPath + files []fsPath + links []fsLink +} + +// assert makes sure that the fs state matches the state of the actual host +// file system +func (fs filesystemState) assert(t *testing.T) { + dirMap := make(map[string]bool) + fileMap := make(map[string]bool) + linkMap := make(map[string]bool) + + for _, d := range fs.dirs { + dirMap[d.prepend(fs.root).String()] = true + } + for _, f := range fs.files { + fileMap[f.prepend(fs.root).String()] = true + } + for _, l := range fs.links { + linkMap[l.path.prepend(fs.root).String()] = true + } + + err := filepath.Walk(fs.root, func(path string, info os.FileInfo, err error) error { + if err != nil { + t.Errorf("filepath.Walk path=%q err=%q", path, err) + return err + } + + if path == fs.root { + return nil + } + + // Careful! Have to check whether the path is a symlink first because, on + // windows, a symlink to a directory will return 'true' for info.IsDir(). + if (info.Mode() & os.ModeSymlink) != 0 { + if linkMap[path] { + delete(linkMap, path) + } else { + t.Errorf("unexpected symlink exists %q", path) + } + return nil + } + + if info.IsDir() { + if dirMap[path] { + delete(dirMap, path) + } else { + t.Errorf("unexpected directory exists %q", path) + } + return nil + } + + if fileMap[path] { + delete(fileMap, path) + } else { + t.Errorf("unexpected file exists %q", path) + } + return nil + }) + + if err != nil { + t.Errorf("filesystem.Walk err=%q", err) + } + + for d := range dirMap { + t.Errorf("could not find expected directory %q", d) + } + for f := range fileMap { + t.Errorf("could not find expected file %q", f) + } + for l := range linkMap { + t.Errorf("could not find expected symlink %q", l) + } +} + +// fsLink represents a symbolic link. +type fsLink struct { + path fsPath + to string +} + +// setup inflates fs onto the actual host file system +func (fs filesystemState) setup(t *testing.T) { + fs.setupDirs(t) + fs.setupFiles(t) + fs.setupLinks(t) +} + +func (fs filesystemState) setupDirs(t *testing.T) { + for _, dir := range fs.dirs { + p := dir.prepend(fs.root) + if err := os.MkdirAll(p.String(), 0777); err != nil { + t.Fatalf("os.MkdirAll(%q, 0777) err=%q", p, err) + } + } +} + +func (fs filesystemState) setupFiles(t *testing.T) { + for _, file := range fs.files { + p := file.prepend(fs.root) + f, err := os.Create(p.String()) + if err != nil { + t.Fatalf("os.Create(%q) err=%q", p, err) + } + if err := f.Close(); err != nil { + t.Fatalf("file %q Close() err=%q", p, err) + } + } +} + +func (fs filesystemState) setupLinks(t *testing.T) { + for _, link := range fs.links { + p := link.path.prepend(fs.root) + + // On Windows, relative symlinks confuse filepath.Walk. This is golang/go + // issue 17540. So, we'll just sigh and do absolute links, assuming they are + // relative to the directory of link.path. + dir := filepath.Dir(p.String()) + to := filepath.Join(dir, link.to) + + if err := os.Symlink(to, p.String()); err != nil { + t.Fatalf("os.Symlink(%q, %q) err=%q", to, p, err) + } + } +} diff --git a/vendor/github.com/sdboyer/gps/glide.lock b/vendor/github.com/sdboyer/gps/glide.lock index 8d45a7837f..34cfa37c67 100644 --- a/vendor/github.com/sdboyer/gps/glide.lock +++ b/vendor/github.com/sdboyer/gps/glide.lock @@ -7,4 +7,6 @@ imports: version: 94ad6eaf8457cf85a68c9b53fa42e9b1b8683783 - name: github.com/Masterminds/vcs version: abd1ea7037d3652ef9833a164b627f49225e1131 +- name: github.com/sdboyer/constext + version: 836a144573533ea4da4e6929c235fd348aed1c80 testImports: [] diff --git a/vendor/github.com/sdboyer/gps/hash.go b/vendor/github.com/sdboyer/gps/hash.go index f6e5d07e23..b2ee8e4663 100644 --- a/vendor/github.com/sdboyer/gps/hash.go +++ b/vendor/github.com/sdboyer/gps/hash.go @@ -62,7 +62,7 @@ func (s *solver) writeHashingInputs(w io.Writer) { for _, pd := range s.rd.getApplicableConstraints() { writeString(string(pd.Ident.ProjectRoot)) writeString(pd.Ident.Source) - writeString(typedConstraintString(pd.Constraint)) + writeString(pd.Constraint.typedString()) } // Write out each discrete import, including those derived from requires. @@ -99,12 +99,12 @@ func (s *solver) writeHashingInputs(w io.Writer) { writeString(pc.Ident.Source) } if pc.Constraint != nil { - writeString(typedConstraintString(pc.Constraint)) + writeString(pc.Constraint.typedString()) } } writeString(hhAnalyzer) - an, av := s.b.AnalyzerInfo() + an, av := s.rd.an.Info() writeString(an) writeString(strconv.Itoa(av)) } diff --git a/vendor/github.com/sdboyer/gps/hash_test.go b/vendor/github.com/sdboyer/gps/hash_test.go index 84f3618df1..ad9466eb61 100644 --- a/vendor/github.com/sdboyer/gps/hash_test.go +++ b/vendor/github.com/sdboyer/gps/hash_test.go @@ -16,12 +16,12 @@ func TestHashInputs(t *testing.T) { RootDir: string(fix.ds[0].n), RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), + ProjectAnalyzer: naiveAnalyzer{}, } s, err := Prepare(params, newdepspecSM(fix.ds, nil)) if err != nil { - t.Errorf("Unexpected error while prepping solver: %s", err) - t.FailNow() + t.Fatalf("Unexpected error while prepping solver: %s", err) } dig := s.HashInputs() @@ -39,7 +39,7 @@ func TestHashInputs(t *testing.T) { hhIgnores, hhOverrides, hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", } for _, v := range elems { @@ -67,12 +67,12 @@ func TestHashInputsReqsIgs(t *testing.T) { RootDir: string(fix.ds[0].n), RootPackageTree: fix.rootTree(), Manifest: rm, + ProjectAnalyzer: naiveAnalyzer{}, } s, err := Prepare(params, newdepspecSM(fix.ds, nil)) if err != nil { - t.Errorf("Unexpected error while prepping solver: %s", err) - t.FailNow() + t.Fatalf("Unexpected error while prepping solver: %s", err) } dig := s.HashInputs() @@ -92,7 +92,7 @@ func TestHashInputsReqsIgs(t *testing.T) { "foo", hhOverrides, hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", } for _, v := range elems { @@ -114,8 +114,7 @@ func TestHashInputsReqsIgs(t *testing.T) { s, err = Prepare(params, newdepspecSM(fix.ds, nil)) if err != nil { - t.Errorf("Unexpected error while prepping solver: %s", err) - t.FailNow() + t.Fatalf("Unexpected error while prepping solver: %s", err) } dig = s.HashInputs() @@ -137,7 +136,7 @@ func TestHashInputsReqsIgs(t *testing.T) { "foo", hhOverrides, hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", } for _, v := range elems { @@ -155,8 +154,7 @@ func TestHashInputsReqsIgs(t *testing.T) { s, err = Prepare(params, newdepspecSM(fix.ds, nil)) if err != nil { - t.Errorf("Unexpected error while prepping solver: %s", err) - t.FailNow() + t.Fatalf("Unexpected error while prepping solver: %s", err) } dig = s.HashInputs() @@ -176,7 +174,7 @@ func TestHashInputsReqsIgs(t *testing.T) { hhIgnores, hhOverrides, hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", } for _, v := range elems { @@ -198,6 +196,7 @@ func TestHashInputsOverrides(t *testing.T) { RootDir: string(basefix.ds[0].n), RootPackageTree: basefix.rootTree(), Manifest: rm, + ProjectAnalyzer: naiveAnalyzer{}, } table := []struct { @@ -231,7 +230,7 @@ func TestHashInputsOverrides(t *testing.T) { "c", "car", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -262,7 +261,7 @@ func TestHashInputsOverrides(t *testing.T) { "c", "car", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -292,7 +291,7 @@ func TestHashInputsOverrides(t *testing.T) { "c", "car", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -320,7 +319,7 @@ func TestHashInputsOverrides(t *testing.T) { "c", "car", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -352,7 +351,7 @@ func TestHashInputsOverrides(t *testing.T) { "d", "b-foobranch", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -380,7 +379,7 @@ func TestHashInputsOverrides(t *testing.T) { "d", "b-foobranch", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -410,7 +409,7 @@ func TestHashInputsOverrides(t *testing.T) { "d", "b-foobranch", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -441,7 +440,7 @@ func TestHashInputsOverrides(t *testing.T) { "d", "b-foobranch", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -473,7 +472,7 @@ func TestHashInputsOverrides(t *testing.T) { "d", "b-foobranch", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -507,7 +506,7 @@ func TestHashInputsOverrides(t *testing.T) { "d", "b-foobranch", hhAnalyzer, - "depspec-sm-builtin", + "naive-analyzer", "1", }, }, @@ -519,8 +518,7 @@ func TestHashInputsOverrides(t *testing.T) { s, err := Prepare(params, newdepspecSM(basefix.ds, nil)) if err != nil { - t.Errorf("(fix: %q) Unexpected error while prepping solver: %s", fix.name, err) - t.FailNow() + t.Fatalf("(fix: %q) Unexpected error while prepping solver: %s", fix.name, err) } h := sha256.New() diff --git a/vendor/github.com/sdboyer/gps/lock_test.go b/vendor/github.com/sdboyer/gps/lock_test.go index d49fccf22a..b85e0de14b 100644 --- a/vendor/github.com/sdboyer/gps/lock_test.go +++ b/vendor/github.com/sdboyer/gps/lock_test.go @@ -55,6 +55,7 @@ func TestLockedProjectsEq(t *testing.T) { } for k, f := range fix { + k, f := k, f t.Run(k, func(t *testing.T) { if f.shouldeq { if !lps[f.l1].Eq(lps[f.l2]) { @@ -70,7 +71,6 @@ func TestLockedProjectsEq(t *testing.T) { if lps[f.l2].Eq(lps[f.l1]) { t.Error(f.err + (" (reversed)")) } - } }) } diff --git a/vendor/github.com/sdboyer/gps/lockdiff.go b/vendor/github.com/sdboyer/gps/lockdiff.go new file mode 100644 index 0000000000..65a798c5fa --- /dev/null +++ b/vendor/github.com/sdboyer/gps/lockdiff.go @@ -0,0 +1,253 @@ +package gps + +import ( + "encoding/hex" + "fmt" + "sort" + "strings" +) + +// StringDiff represents a modified string value. +// * Added: Previous = nil, Current != nil +// * Deleted: Previous != nil, Current = nil +// * Modified: Previous != nil, Current != nil +// * No Change: Previous = Current, or a nil pointer +type StringDiff struct { + Previous string + Current string +} + +func (diff *StringDiff) String() string { + if diff == nil { + return "" + } + + if diff.Previous == "" && diff.Current != "" { + return fmt.Sprintf("+ %s", diff.Current) + } + + if diff.Previous != "" && diff.Current == "" { + return fmt.Sprintf("- %s", diff.Previous) + } + + if diff.Previous != diff.Current { + return fmt.Sprintf("%s -> %s", diff.Previous, diff.Current) + } + + return diff.Current +} + +// LockDiff is the set of differences between an existing lock file and an updated lock file. +// Fields are only populated when there is a difference, otherwise they are empty. +type LockDiff struct { + HashDiff *StringDiff + Add []LockedProjectDiff + Remove []LockedProjectDiff + Modify []LockedProjectDiff +} + +// LockedProjectDiff contains the before and after snapshot of a project reference. +// Fields are only populated when there is a difference, otherwise they are empty. +type LockedProjectDiff struct { + Name ProjectRoot + Source *StringDiff + Version *StringDiff + Branch *StringDiff + Revision *StringDiff + Packages []StringDiff +} + +// DiffLocks compares two locks and identifies the differences between them. +// Returns nil if there are no differences. +func DiffLocks(l1 Lock, l2 Lock) *LockDiff { + // Default nil locks to empty locks, so that we can still generate a diff + if l1 == nil { + l1 = &SimpleLock{} + } + if l2 == nil { + l2 = &SimpleLock{} + } + + p1, p2 := l1.Projects(), l2.Projects() + + // Check if the slices are sorted already. If they are, we can compare + // without copying. Otherwise, we have to copy to avoid altering the + // original input. + sp1, sp2 := lpsorter(p1), lpsorter(p2) + if len(p1) > 1 && !sort.IsSorted(sp1) { + p1 = make([]LockedProject, len(p1)) + copy(p1, l1.Projects()) + sort.Sort(lpsorter(p1)) + } + if len(p2) > 1 && !sort.IsSorted(sp2) { + p2 = make([]LockedProject, len(p2)) + copy(p2, l2.Projects()) + sort.Sort(lpsorter(p2)) + } + + diff := LockDiff{} + + h1 := hex.EncodeToString(l1.InputHash()) + h2 := hex.EncodeToString(l2.InputHash()) + if h1 != h2 { + diff.HashDiff = &StringDiff{Previous: h1, Current: h2} + } + + var i2next int + for i1 := 0; i1 < len(p1); i1++ { + lp1 := p1[i1] + pr1 := lp1.pi.ProjectRoot + + var matched bool + for i2 := i2next; i2 < len(p2); i2++ { + lp2 := p2[i2] + pr2 := lp2.pi.ProjectRoot + + switch strings.Compare(string(pr1), string(pr2)) { + case 0: // Found a matching project + matched = true + pdiff := DiffProjects(lp1, lp2) + if pdiff != nil { + diff.Modify = append(diff.Modify, *pdiff) + } + i2next = i2 + 1 // Don't evaluate to this again + case +1: // Found a new project + add := buildLockedProjectDiff(lp2) + diff.Add = append(diff.Add, add) + i2next = i2 + 1 // Don't evaluate to this again + continue // Keep looking for a matching project + case -1: // Project has been removed, handled below + break + } + + break // Done evaluating this project, move onto the next + } + + if !matched { + remove := buildLockedProjectDiff(lp1) + diff.Remove = append(diff.Remove, remove) + } + } + + // Anything that still hasn't been evaluated are adds + for i2 := i2next; i2 < len(p2); i2++ { + lp2 := p2[i2] + add := buildLockedProjectDiff(lp2) + diff.Add = append(diff.Add, add) + } + + if diff.HashDiff == nil && len(diff.Add) == 0 && len(diff.Remove) == 0 && len(diff.Modify) == 0 { + return nil // The locks are the equivalent + } + return &diff +} + +func buildLockedProjectDiff(lp LockedProject) LockedProjectDiff { + s2 := lp.pi.Source + r2, b2, v2 := VersionComponentStrings(lp.Version()) + + var rev, version, branch, source *StringDiff + if s2 != "" { + source = &StringDiff{Previous: s2, Current: s2} + } + if r2 != "" { + rev = &StringDiff{Previous: r2, Current: r2} + } + if b2 != "" { + branch = &StringDiff{Previous: b2, Current: b2} + } + if v2 != "" { + version = &StringDiff{Previous: v2, Current: v2} + } + + add := LockedProjectDiff{ + Name: lp.pi.ProjectRoot, + Source: source, + Revision: rev, + Version: version, + Branch: branch, + Packages: make([]StringDiff, len(lp.Packages())), + } + for i, pkg := range lp.Packages() { + add.Packages[i] = StringDiff{Previous: pkg, Current: pkg} + } + return add +} + +// DiffProjects compares two projects and identifies the differences between them. +// Returns nil if there are no differences +func DiffProjects(lp1 LockedProject, lp2 LockedProject) *LockedProjectDiff { + diff := LockedProjectDiff{Name: lp1.pi.ProjectRoot} + + s1 := lp1.pi.Source + s2 := lp2.pi.Source + if s1 != s2 { + diff.Source = &StringDiff{Previous: s1, Current: s2} + } + + r1, b1, v1 := VersionComponentStrings(lp1.Version()) + r2, b2, v2 := VersionComponentStrings(lp2.Version()) + if r1 != r2 { + diff.Revision = &StringDiff{Previous: r1, Current: r2} + } + if b1 != b2 { + diff.Branch = &StringDiff{Previous: b1, Current: b2} + } + if v1 != v2 { + diff.Version = &StringDiff{Previous: v1, Current: v2} + } + + p1 := lp1.Packages() + p2 := lp2.Packages() + if !sort.StringsAreSorted(p1) { + p1 = make([]string, len(p1)) + copy(p1, lp1.Packages()) + sort.Strings(p1) + } + if !sort.StringsAreSorted(p2) { + p2 = make([]string, len(p2)) + copy(p2, lp2.Packages()) + sort.Strings(p2) + } + + var i2next int + for i1 := 0; i1 < len(p1); i1++ { + pkg1 := p1[i1] + + var matched bool + for i2 := i2next; i2 < len(p2); i2++ { + pkg2 := p2[i2] + + switch strings.Compare(pkg1, pkg2) { + case 0: // Found matching package + matched = true + i2next = i2 + 1 // Don't evaluate to this again + case +1: // Found a new package + add := StringDiff{Current: pkg2} + diff.Packages = append(diff.Packages, add) + i2next = i2 + 1 // Don't evaluate to this again + continue // Keep looking for a match + case -1: // Package has been removed (handled below) + break + } + + break // Done evaluating this package, move onto the next + } + + if !matched { + diff.Packages = append(diff.Packages, StringDiff{Previous: pkg1}) + } + } + + // Anything that still hasn't been evaluated are adds + for i2 := i2next; i2 < len(p2); i2++ { + pkg2 := p2[i2] + add := StringDiff{Current: pkg2} + diff.Packages = append(diff.Packages, add) + } + + if diff.Source == nil && diff.Version == nil && diff.Revision == nil && len(diff.Packages) == 0 { + return nil // The projects are equivalent + } + return &diff +} diff --git a/vendor/github.com/sdboyer/gps/lockdiff_test.go b/vendor/github.com/sdboyer/gps/lockdiff_test.go new file mode 100644 index 0000000000..87a40c394f --- /dev/null +++ b/vendor/github.com/sdboyer/gps/lockdiff_test.go @@ -0,0 +1,497 @@ +package gps + +import ( + "bytes" + "encoding/hex" + "testing" +) + +func TestStringDiff_NoChange(t *testing.T) { + diff := StringDiff{Previous: "foo", Current: "foo"} + want := "foo" + got := diff.String() + if got != want { + t.Fatalf("Expected '%s', got '%s'", want, got) + } +} + +func TestStringDiff_Add(t *testing.T) { + diff := StringDiff{Current: "foo"} + got := diff.String() + if got != "+ foo" { + t.Fatalf("Expected '+ foo', got '%s'", got) + } +} + +func TestStringDiff_Remove(t *testing.T) { + diff := StringDiff{Previous: "foo"} + want := "- foo" + got := diff.String() + if got != want { + t.Fatalf("Expected '%s', got '%s'", want, got) + } +} + +func TestStringDiff_Modify(t *testing.T) { + diff := StringDiff{Previous: "foo", Current: "bar"} + want := "foo -> bar" + got := diff.String() + if got != want { + t.Fatalf("Expected '%s', got '%s'", want, got) + } +} + +func TestDiffProjects_NoChange(t *testing.T) { + p1 := NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps"}) + p2 := NewLockedProject(mkPI("github.com/sdboyer/gps"), NewVersion("v0.10.0"), []string{"gps"}) + + diff := DiffProjects(p1, p2) + if diff != nil { + t.Fatal("Expected the diff to be nil") + } +} + +func TestDiffProjects_Modify(t *testing.T) { + p1 := LockedProject{ + pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, + v: NewBranch("master"), + r: "abc123", + pkgs: []string{"baz", "qux"}, + } + + p2 := LockedProject{ + pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar", Source: "https://github.com/mcfork/gps.git"}, + v: NewVersion("v1.0.0"), + r: "def456", + pkgs: []string{"baz", "derp"}, + } + + diff := DiffProjects(p1, p2) + if diff == nil { + t.Fatal("Expected the diff to be populated") + } + + wantSource := "+ https://github.com/mcfork/gps.git" + gotSource := diff.Source.String() + if gotSource != wantSource { + t.Fatalf("Expected diff.Source to be '%s', got '%s'", wantSource, diff.Source) + } + + wantVersion := "+ v1.0.0" + gotVersion := diff.Version.String() + if gotVersion != wantVersion { + t.Fatalf("Expected diff.Version to be '%s', got '%s'", wantVersion, gotVersion) + } + + wantRevision := "abc123 -> def456" + gotRevision := diff.Revision.String() + if gotRevision != wantRevision { + t.Fatalf("Expected diff.Revision to be '%s', got '%s'", wantRevision, gotRevision) + } + + wantBranch := "- master" + gotBranch := diff.Branch.String() + if gotBranch != wantBranch { + t.Fatalf("Expected diff.Branch to be '%s', got '%s'", wantBranch, gotBranch) + } + + fmtPkgs := func(pkgs []StringDiff) string { + b := bytes.NewBufferString("[") + for _, pkg := range pkgs { + b.WriteString(pkg.String()) + b.WriteString(",") + } + b.WriteString("]") + return b.String() + } + + wantPackages := "[+ derp,- qux,]" + gotPackages := fmtPkgs(diff.Packages) + if gotPackages != wantPackages { + t.Fatalf("Expected diff.Packages to be '%s', got '%s'", wantPackages, gotPackages) + } +} + +func TestDiffProjects_AddPackages(t *testing.T) { + p1 := LockedProject{ + pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, + v: NewBranch("master"), + r: "abc123", + pkgs: []string{"foobar"}, + } + + p2 := LockedProject{ + pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar", Source: "https://github.com/mcfork/gps.git"}, + v: NewVersion("v1.0.0"), + r: "def456", + pkgs: []string{"bazqux", "foobar", "zugzug"}, + } + + diff := DiffProjects(p1, p2) + if diff == nil { + t.Fatal("Expected the diff to be populated") + } + + if len(diff.Packages) != 2 { + t.Fatalf("Expected diff.Packages to have 2 packages, got %d", len(diff.Packages)) + } + + want0 := "+ bazqux" + got0 := diff.Packages[0].String() + if got0 != want0 { + t.Fatalf("Expected diff.Packages[0] to contain %s, got %s", want0, got0) + } + + want1 := "+ zugzug" + got1 := diff.Packages[1].String() + if got1 != want1 { + t.Fatalf("Expected diff.Packages[1] to contain %s, got %s", want1, got1) + } +} + +func TestDiffProjects_RemovePackages(t *testing.T) { + p1 := LockedProject{ + pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, + v: NewBranch("master"), + r: "abc123", + pkgs: []string{"athing", "foobar"}, + } + + p2 := LockedProject{ + pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar", Source: "https://github.com/mcfork/gps.git"}, + v: NewVersion("v1.0.0"), + r: "def456", + pkgs: []string{"bazqux"}, + } + + diff := DiffProjects(p1, p2) + if diff == nil { + t.Fatal("Expected the diff to be populated") + } + + if len(diff.Packages) > 3 { + t.Fatalf("Expected diff.Packages to have 3 packages, got %d", len(diff.Packages)) + } + + want0 := "- athing" + got0 := diff.Packages[0].String() + if got0 != want0 { + t.Fatalf("Expected diff.Packages[0] to contain %s, got %s", want0, got0) + } + + // diff.Packages[1] is '+ bazqux' + + want2 := "- foobar" + got2 := diff.Packages[2].String() + if got2 != want2 { + t.Fatalf("Expected diff.Packages[2] to contain %s, got %s", want2, got2) + } +} + +func TestDiffLocks_NoChange(t *testing.T) { + l1 := safeLock{ + h: []byte("abc123"), + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + }, + } + l2 := safeLock{ + h: []byte("abc123"), + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + }, + } + + diff := DiffLocks(l1, l2) + if diff != nil { + t.Fatal("Expected the diff to be nil") + } +} + +func TestDiffLocks_AddProjects(t *testing.T) { + l1 := safeLock{ + h: []byte("abc123"), + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + }, + } + l2 := safeLock{ + h: []byte("abc123"), + p: []LockedProject{ + { + pi: ProjectIdentifier{ProjectRoot: "github.com/baz/qux", Source: "https://github.com/mcfork/bazqux.git"}, + v: NewVersion("v0.5.0"), + r: "def456", + pkgs: []string{"p1", "p2"}, + }, + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + {pi: ProjectIdentifier{ProjectRoot: "github.com/zug/zug"}, v: NewVersion("v1.0.0")}, + }, + } + + diff := DiffLocks(l1, l2) + if diff == nil { + t.Fatal("Expected the diff to be populated") + } + + if len(diff.Add) != 2 { + t.Fatalf("Expected diff.Add to have 2 projects, got %d", len(diff.Add)) + } + + want0 := "github.com/baz/qux" + got0 := string(diff.Add[0].Name) + if got0 != want0 { + t.Fatalf("Expected diff.Add[0] to contain %s, got %s", want0, got0) + } + + want1 := "github.com/zug/zug" + got1 := string(diff.Add[1].Name) + if got1 != want1 { + t.Fatalf("Expected diff.Add[1] to contain %s, got %s", want1, got1) + } + + add0 := diff.Add[0] + wantSource := "https://github.com/mcfork/bazqux.git" + gotSource := add0.Source.String() + if gotSource != wantSource { + t.Fatalf("Expected diff.Add[0].Source to be '%s', got '%s'", wantSource, add0.Source) + } + + wantVersion := "v0.5.0" + gotVersion := add0.Version.String() + if gotVersion != wantVersion { + t.Fatalf("Expected diff.Add[0].Version to be '%s', got '%s'", wantVersion, gotVersion) + } + + wantRevision := "def456" + gotRevision := add0.Revision.String() + if gotRevision != wantRevision { + t.Fatalf("Expected diff.Add[0].Revision to be '%s', got '%s'", wantRevision, gotRevision) + } + + wantBranch := "" + gotBranch := add0.Branch.String() + if gotBranch != wantBranch { + t.Fatalf("Expected diff.Add[0].Branch to be '%s', got '%s'", wantBranch, gotBranch) + } + + fmtPkgs := func(pkgs []StringDiff) string { + b := bytes.NewBufferString("[") + for _, pkg := range pkgs { + b.WriteString(pkg.String()) + b.WriteString(",") + } + b.WriteString("]") + return b.String() + } + + wantPackages := "[p1,p2,]" + gotPackages := fmtPkgs(add0.Packages) + if gotPackages != wantPackages { + t.Fatalf("Expected diff.Add[0].Packages to be '%s', got '%s'", wantPackages, gotPackages) + } +} + +func TestDiffLocks_RemoveProjects(t *testing.T) { + l1 := safeLock{ + h: []byte("abc123"), + p: []LockedProject{ + { + pi: ProjectIdentifier{ProjectRoot: "github.com/a/thing", Source: "https://github.com/mcfork/athing.git"}, + v: NewBranch("master"), + r: "def456", + pkgs: []string{"p1", "p2"}, + }, + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + }, + } + l2 := safeLock{ + h: []byte("abc123"), + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/baz/qux"}, v: NewVersion("v1.0.0")}, + }, + } + + diff := DiffLocks(l1, l2) + if diff == nil { + t.Fatal("Expected the diff to be populated") + } + + if len(diff.Remove) != 2 { + t.Fatalf("Expected diff.Remove to have 2 projects, got %d", len(diff.Remove)) + } + + want0 := "github.com/a/thing" + got0 := string(diff.Remove[0].Name) + if got0 != want0 { + t.Fatalf("Expected diff.Remove[0] to contain %s, got %s", want0, got0) + } + + want1 := "github.com/foo/bar" + got1 := string(diff.Remove[1].Name) + if got1 != want1 { + t.Fatalf("Expected diff.Remove[1] to contain %s, got %s", want1, got1) + } + + remove0 := diff.Remove[0] + wantSource := "https://github.com/mcfork/athing.git" + gotSource := remove0.Source.String() + if gotSource != wantSource { + t.Fatalf("Expected diff.Remove[0].Source to be '%s', got '%s'", wantSource, remove0.Source) + } + + wantVersion := "" + gotVersion := remove0.Version.String() + if gotVersion != wantVersion { + t.Fatalf("Expected diff.Remove[0].Version to be '%s', got '%s'", wantVersion, gotVersion) + } + + wantRevision := "def456" + gotRevision := remove0.Revision.String() + if gotRevision != wantRevision { + t.Fatalf("Expected diff.Remove[0].Revision to be '%s', got '%s'", wantRevision, gotRevision) + } + + wantBranch := "master" + gotBranch := remove0.Branch.String() + if gotBranch != wantBranch { + t.Fatalf("Expected diff.Remove[0].Branch to be '%s', got '%s'", wantBranch, gotBranch) + } + + fmtPkgs := func(pkgs []StringDiff) string { + b := bytes.NewBufferString("[") + for _, pkg := range pkgs { + b.WriteString(pkg.String()) + b.WriteString(",") + } + b.WriteString("]") + return b.String() + } + + wantPackages := "[p1,p2,]" + gotPackages := fmtPkgs(remove0.Packages) + if gotPackages != wantPackages { + t.Fatalf("Expected diff.Remove[0].Packages to be '%s', got '%s'", wantPackages, gotPackages) + } +} + +func TestDiffLocks_ModifyProjects(t *testing.T) { + l1 := safeLock{ + h: []byte("abc123"), + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bu"}, v: NewVersion("v1.0.0")}, + {pi: ProjectIdentifier{ProjectRoot: "github.com/zig/zag"}, v: NewVersion("v1.0.0")}, + }, + } + l2 := safeLock{ + h: []byte("abc123"), + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/baz/qux"}, v: NewVersion("v1.0.0")}, + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v2.0.0")}, + {pi: ProjectIdentifier{ProjectRoot: "github.com/zig/zag"}, v: NewVersion("v2.0.0")}, + {pi: ProjectIdentifier{ProjectRoot: "github.com/zug/zug"}, v: NewVersion("v1.0.0")}, + }, + } + + diff := DiffLocks(l1, l2) + if diff == nil { + t.Fatal("Expected the diff to be populated") + } + + if len(diff.Modify) != 2 { + t.Fatalf("Expected diff.Remove to have 2 projects, got %d", len(diff.Remove)) + } + + want0 := "github.com/foo/bar" + got0 := string(diff.Modify[0].Name) + if got0 != want0 { + t.Fatalf("Expected diff.Modify[0] to contain %s, got %s", want0, got0) + } + + want1 := "github.com/zig/zag" + got1 := string(diff.Modify[1].Name) + if got1 != want1 { + t.Fatalf("Expected diff.Modify[1] to contain %s, got %s", want1, got1) + } +} + +func TestDiffLocks_ModifyHash(t *testing.T) { + h1, _ := hex.DecodeString("abc123") + l1 := safeLock{ + h: h1, + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + }, + } + + h2, _ := hex.DecodeString("def456") + l2 := safeLock{ + h: h2, + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + }, + } + + diff := DiffLocks(l1, l2) + if diff == nil { + t.Fatal("Expected the diff to be populated") + } + + want := "abc123 -> def456" + got := diff.HashDiff.String() + if got != want { + t.Fatalf("Expected diff.HashDiff to be '%s', got '%s'", want, got) + } +} + +func TestDiffLocks_EmptyInitialLock(t *testing.T) { + h2, _ := hex.DecodeString("abc123") + l2 := safeLock{ + h: h2, + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + }, + } + + diff := DiffLocks(nil, l2) + + wantHash := "+ abc123" + gotHash := diff.HashDiff.String() + if gotHash != wantHash { + t.Fatalf("Expected diff.HashDiff to be '%s', got '%s'", wantHash, gotHash) + } + + if len(diff.Add) != 1 { + t.Fatalf("Expected diff.Add to contain 1 project, got %d", len(diff.Add)) + } +} + +func TestDiffLocks_EmptyFinalLock(t *testing.T) { + h1, _ := hex.DecodeString("abc123") + l1 := safeLock{ + h: h1, + p: []LockedProject{ + {pi: ProjectIdentifier{ProjectRoot: "github.com/foo/bar"}, v: NewVersion("v1.0.0")}, + }, + } + + diff := DiffLocks(l1, nil) + + wantHash := "- abc123" + gotHash := diff.HashDiff.String() + if gotHash != wantHash { + t.Fatalf("Expected diff.HashDiff to be '%s', got '%s'", wantHash, gotHash) + } + + if len(diff.Remove) != 1 { + t.Fatalf("Expected diff.Remove to contain 1 project, got %d", len(diff.Remove)) + } +} + +func TestDiffLocks_EmptyLocks(t *testing.T) { + diff := DiffLocks(nil, nil) + if diff != nil { + t.Fatal("Expected the diff to be empty") + } +} diff --git a/vendor/github.com/sdboyer/gps/manager_test.go b/vendor/github.com/sdboyer/gps/manager_test.go index db566620ca..40989ea413 100644 --- a/vendor/github.com/sdboyer/gps/manager_test.go +++ b/vendor/github.com/sdboyer/gps/manager_test.go @@ -1,6 +1,7 @@ package gps import ( + "context" "fmt" "io/ioutil" "os" @@ -42,14 +43,12 @@ func sv(s string) *semver.Version { func mkNaiveSM(t *testing.T) (*SourceMgr, func()) { cpath, err := ioutil.TempDir("", "smcache") if err != nil { - t.Errorf("Failed to create temp dir: %s", err) - t.FailNow() + t.Fatalf("Failed to create temp dir: %s", err) } - sm, err := NewSourceManager(naiveAnalyzer{}, cpath) + sm, err := NewSourceManager(cpath) if err != nil { - t.Errorf("Unexpected error on SourceManager creation: %s", err) - t.FailNow() + t.Fatalf("Unexpected error on SourceManager creation: %s", err) } return sm, func() { @@ -65,10 +64,9 @@ func remakeNaiveSM(osm *SourceMgr, t *testing.T) (*SourceMgr, func()) { cpath := osm.cachedir osm.Release() - sm, err := NewSourceManager(naiveAnalyzer{}, cpath) + sm, err := NewSourceManager(cpath) if err != nil { - t.Errorf("unexpected error on SourceManager recreation: %s", err) - t.FailNow() + t.Fatalf("unexpected error on SourceManager recreation: %s", err) } return sm, func() { @@ -90,13 +88,13 @@ func TestSourceManagerInit(t *testing.T) { if err != nil { t.Errorf("Failed to create temp dir: %s", err) } - sm, err := NewSourceManager(naiveAnalyzer{}, cpath) + sm, err := NewSourceManager(cpath) if err != nil { t.Errorf("Unexpected error on SourceManager creation: %s", err) } - _, err = NewSourceManager(naiveAnalyzer{}, cpath) + _, err = NewSourceManager(cpath) if err == nil { t.Errorf("Creating second SourceManager should have failed due to file lock contention") } else if te, ok := err.(CouldNotCreateLockError); !ok { @@ -114,12 +112,11 @@ func TestSourceManagerInit(t *testing.T) { } if _, err = os.Stat(path.Join(cpath, "sm.lock")); !os.IsNotExist(err) { - t.Errorf("Global cache lock file not cleared correctly on Release()") - t.FailNow() + t.Fatalf("Global cache lock file not cleared correctly on Release()") } // Set another one up at the same spot now, just to be sure - sm, err = NewSourceManager(naiveAnalyzer{}, cpath) + sm, err = NewSourceManager(cpath) if err != nil { t.Errorf("Creating a second SourceManager should have succeeded when the first was released, but failed with err %s", err) } @@ -139,14 +136,12 @@ func TestSourceInit(t *testing.T) { cpath, err := ioutil.TempDir("", "smcache") if err != nil { - t.Errorf("Failed to create temp dir: %s", err) - t.FailNow() + t.Fatalf("Failed to create temp dir: %s", err) } - sm, err := NewSourceManager(naiveAnalyzer{}, cpath) + sm, err := NewSourceManager(cpath) if err != nil { - t.Errorf("Unexpected error on SourceManager creation: %s", err) - t.FailNow() + t.Fatalf("Unexpected error on SourceManager creation: %s", err) } defer func() { @@ -158,15 +153,15 @@ func TestSourceInit(t *testing.T) { }() id := mkPI("github.com/sdboyer/gpkt").normalize() - v, err := sm.ListVersions(id) + pvl, err := sm.ListVersions(id) if err != nil { t.Errorf("Unexpected error during initial project setup/fetching %s", err) } - if len(v) != 7 { - t.Errorf("Expected seven version results from the test repo, got %v", len(v)) + if len(pvl) != 7 { + t.Errorf("Expected seven version results from the test repo, got %v", len(pvl)) } else { - expected := []Version{ + expected := []PairedVersion{ NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), NewVersion("v1.0.0").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), @@ -178,11 +173,11 @@ func TestSourceInit(t *testing.T) { // SourceManager itself doesn't guarantee ordering; sort them here so we // can dependably check output - SortForUpgrade(v) + SortPairedForUpgrade(pvl) for k, e := range expected { - if !v[k].Matches(e) { - t.Errorf("Expected version %s in position %v but got %s", e, k, v[k]) + if !pvl[k].Matches(e) { + t.Errorf("Expected version %s in position %v but got %s", e, k, pvl[k]) } } } @@ -196,13 +191,13 @@ func TestSourceInit(t *testing.T) { s: &solver{mtr: newMetrics()}, } - v, err = smc.ListVersions(id) + vl, err := smc.listVersions(id) if err != nil { t.Errorf("Unexpected error during initial project setup/fetching %s", err) } - if len(v) != 7 { - t.Errorf("Expected seven version results from the test repo, got %v", len(v)) + if len(vl) != 7 { + t.Errorf("Expected seven version results from the test repo, got %v", len(vl)) } else { expected := []Version{ NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), @@ -215,21 +210,21 @@ func TestSourceInit(t *testing.T) { } for k, e := range expected { - if !v[k].Matches(e) { - t.Errorf("Expected version %s in position %v but got %s", e, k, v[k]) + if !vl[k].Matches(e) { + t.Errorf("Expected version %s in position %v but got %s", e, k, vl[k]) } } - if !v[3].(versionPair).v.(branchVersion).isDefault { + if !vl[3].(versionPair).v.(branchVersion).isDefault { t.Error("Expected master branch version to have isDefault flag, but it did not") } - if v[4].(versionPair).v.(branchVersion).isDefault { + if vl[4].(versionPair).v.(branchVersion).isDefault { t.Error("Expected v1 branch version not to have isDefault flag, but it did") } - if v[5].(versionPair).v.(branchVersion).isDefault { + if vl[5].(versionPair).v.(branchVersion).isDefault { t.Error("Expected v1.1 branch version not to have isDefault flag, but it did") } - if v[6].(versionPair).v.(branchVersion).isDefault { + if vl[6].(versionPair).v.(branchVersion).isDefault { t.Error("Expected v3 branch version not to have isDefault flag, but it did") } } @@ -289,13 +284,13 @@ func TestDefaultBranchAssignment(t *testing.T) { } else { brev := Revision("fda020843ac81352004b9dca3fcccdd517600149") mrev := Revision("9f9c3a591773d9b28128309ac7a9a72abcab267d") - expected := []Version{ + expected := []PairedVersion{ NewBranch("branchone").Is(brev), NewBranch("otherbranch").Is(brev), NewBranch("master").Is(mrev), } - SortForUpgrade(v) + SortPairedForUpgrade(v) for k, e := range expected { if !v[k].Matches(e) { @@ -337,7 +332,7 @@ func TestMgrMethodsFailWithBadPath(t *testing.T) { if _, err = sm.ListPackages(bad, nil); err == nil { t.Error("ListPackages() did not error on bad input") } - if _, _, err = sm.GetManifestAndLock(bad, nil); err == nil { + if _, _, err = sm.GetManifestAndLock(bad, nil, naiveAnalyzer{}); err == nil { t.Error("GetManifestAndLock() did not error on bad input") } if err = sm.ExportProject(bad, nil, ""); err == nil { @@ -360,53 +355,53 @@ func TestGetSources(t *testing.T) { mkPI("launchpad.net/govcstestbzrrepo").normalize(), } - wg := &sync.WaitGroup{} - wg.Add(3) - for _, pi := range pil { - go func(lpi ProjectIdentifier) { - defer wg.Done() - - nn := lpi.normalizedSource() - src, err := sm.getSourceFor(lpi) - if err != nil { - t.Errorf("(src %q) unexpected error setting up source: %s", nn, err) - return - } + ctx := context.Background() + // protects against premature release of sm + t.Run("inner", func(t *testing.T) { + for _, pi := range pil { + lpi := pi + t.Run(lpi.normalizedSource(), func(t *testing.T) { + t.Parallel() - // Re-get the same, make sure they are the same - src2, err := sm.getSourceFor(lpi) - if err != nil { - t.Errorf("(src %q) unexpected error re-getting source: %s", nn, err) - } else if src != src2 { - t.Errorf("(src %q) first and second sources are not eq", nn) - } + srcg, err := sm.srcCoord.getSourceGatewayFor(ctx, lpi) + if err != nil { + t.Errorf("unexpected error setting up source: %s", err) + return + } - // All of them _should_ select https, so this should work - lpi.Source = "https://" + lpi.Source - src3, err := sm.getSourceFor(lpi) - if err != nil { - t.Errorf("(src %q) unexpected error getting explicit https source: %s", nn, err) - } else if src != src3 { - t.Errorf("(src %q) explicit https source should reuse autodetected https source", nn) - } + // Re-get the same, make sure they are the same + srcg2, err := sm.srcCoord.getSourceGatewayFor(ctx, lpi) + if err != nil { + t.Errorf("unexpected error re-getting source: %s", err) + } else if srcg != srcg2 { + t.Error("first and second sources are not eq") + } - // Now put in http, and they should differ - lpi.Source = "http://" + string(lpi.ProjectRoot) - src4, err := sm.getSourceFor(lpi) - if err != nil { - t.Errorf("(src %q) unexpected error getting explicit http source: %s", nn, err) - } else if src == src4 { - t.Errorf("(src %q) explicit http source should create a new src", nn) - } - }(pi) - } + // All of them _should_ select https, so this should work + lpi.Source = "https://" + lpi.Source + srcg3, err := sm.srcCoord.getSourceGatewayFor(ctx, lpi) + if err != nil { + t.Errorf("unexpected error getting explicit https source: %s", err) + } else if srcg != srcg3 { + t.Error("explicit https source should reuse autodetected https source") + } - wg.Wait() + // Now put in http, and they should differ + lpi.Source = "http://" + string(lpi.ProjectRoot) + srcg4, err := sm.srcCoord.getSourceGatewayFor(ctx, lpi) + if err != nil { + t.Errorf("unexpected error getting explicit http source: %s", err) + } else if srcg == srcg4 { + t.Error("explicit http source should create a new src") + } + }) + } + }) // nine entries (of which three are dupes): for each vcs, raw import path, // the https url, and the http url - if len(sm.srcs) != 9 { - t.Errorf("Should have nine discrete entries in the srcs map, got %v", len(sm.srcs)) + if len(sm.srcCoord.nameToURL) != 9 { + t.Errorf("Should have nine discrete entries in the nameToURL map, got %v", len(sm.srcCoord.nameToURL)) } clean() } @@ -425,7 +420,7 @@ func TestGetInfoListVersionsOrdering(t *testing.T) { id := mkPI("github.com/sdboyer/gpkt").normalize() - _, _, err := sm.GetManifestAndLock(id, NewVersion("v1.0.0")) + _, _, err := sm.GetManifestAndLock(id, NewVersion("v1.0.0"), naiveAnalyzer{}) if err != nil { t.Errorf("Unexpected error from GetInfoAt %s", err) } @@ -452,8 +447,8 @@ func TestDeduceProjectRoot(t *testing.T) { if string(pr) != in { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } - if sm.rootxt.Len() != 1 { - t.Errorf("Root path trie should have one element after one deduction, has %v", sm.rootxt.Len()) + if sm.deduceCoord.rootxt.Len() != 1 { + t.Errorf("Root path trie should have one element after one deduction, has %v", sm.deduceCoord.rootxt.Len()) } pr, err = sm.DeduceProjectRoot(in) @@ -462,8 +457,8 @@ func TestDeduceProjectRoot(t *testing.T) { } else if string(pr) != in { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } - if sm.rootxt.Len() != 1 { - t.Errorf("Root path trie should still have one element after performing the same deduction twice; has %v", sm.rootxt.Len()) + if sm.deduceCoord.rootxt.Len() != 1 { + t.Errorf("Root path trie should still have one element after performing the same deduction twice; has %v", sm.deduceCoord.rootxt.Len()) } // Now do a subpath @@ -474,8 +469,8 @@ func TestDeduceProjectRoot(t *testing.T) { } else if string(pr) != in { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } - if sm.rootxt.Len() != 2 { - t.Errorf("Root path trie should have two elements, one for root and one for subpath; has %v", sm.rootxt.Len()) + if sm.deduceCoord.rootxt.Len() != 1 { + t.Errorf("Root path trie should still have one element, as still only one unique root has gone in; has %v", sm.deduceCoord.rootxt.Len()) } // Now do a fully different root, but still on github @@ -487,8 +482,8 @@ func TestDeduceProjectRoot(t *testing.T) { } else if string(pr) != in2 { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } - if sm.rootxt.Len() != 4 { - t.Errorf("Root path trie should have four elements, one for each unique root and subpath; has %v", sm.rootxt.Len()) + if sm.deduceCoord.rootxt.Len() != 2 { + t.Errorf("Root path trie should have two elements, one for each unique root; has %v", sm.deduceCoord.rootxt.Len()) } // Ensure that our prefixes are bounded by path separators @@ -499,8 +494,8 @@ func TestDeduceProjectRoot(t *testing.T) { } else if string(pr) != in4 { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } - if sm.rootxt.Len() != 5 { - t.Errorf("Root path trie should have five elements, one for each unique root and subpath; has %v", sm.rootxt.Len()) + if sm.deduceCoord.rootxt.Len() != 3 { + t.Errorf("Root path trie should have three elements, one for each unique root; has %v", sm.deduceCoord.rootxt.Len()) } // Ensure that vcs extension-based matching comes through @@ -511,84 +506,8 @@ func TestDeduceProjectRoot(t *testing.T) { } else if string(pr) != in5 { t.Errorf("Wrong project root was deduced;\n\t(GOT) %s\n\t(WNT) %s", pr, in) } - if sm.rootxt.Len() != 6 { - t.Errorf("Root path trie should have six elements, one for each unique root and subpath; has %v", sm.rootxt.Len()) - } -} - -// Test that the deduction performed in SourceMgr.deducePathAndProcess() is safe -// for parallel execution - in particular, that parallel calls to the same -// resource fold in together as expected. -// -// Obviously, this is just a heuristic; while failure means something's -// definitely broken, success does not guarantee correctness. -func TestMultiDeduceThreadsafe(t *testing.T) { - sm, clean := mkNaiveSM(t) - defer clean() - - in := "github.com/sdboyer/gps" - ft, err := sm.deducePathAndProcess(in) - if err != nil { - t.Errorf("Known-good path %q had unexpected basic deduction error: %s", in, err) - t.FailNow() - } - - cnum := 50 - wg := &sync.WaitGroup{} - - // Set up channel for everything else to block on - c := make(chan struct{}, 1) - f := func(rnum int) { - defer func() { - wg.Done() - if e := recover(); e != nil { - t.Errorf("goroutine number %v panicked with err: %s", rnum, e) - } - }() - <-c - _, err := ft.rootf() - if err != nil { - t.Errorf("err was non-nil on root detection in goroutine number %v: %s", rnum, err) - } - } - - for k := range make([]struct{}, cnum) { - wg.Add(1) - go f(k) - runtime.Gosched() - } - close(c) - wg.Wait() - if sm.rootxt.Len() != 1 { - t.Errorf("Root path trie should have just one element; has %v", sm.rootxt.Len()) - } - - // repeat for srcf - wg2 := &sync.WaitGroup{} - c = make(chan struct{}, 1) - f = func(rnum int) { - defer func() { - wg2.Done() - if e := recover(); e != nil { - t.Errorf("goroutine number %v panicked with err: %s", rnum, e) - } - }() - <-c - _, _, err := ft.srcf() - if err != nil { - t.Errorf("err was non-nil on root detection in goroutine number %v: %s", rnum, err) - } - } - - for k := range make([]struct{}, cnum) { - wg2.Add(1) - go f(k) - runtime.Gosched() - } - close(c) - wg2.Wait() - if len(sm.srcs) != 2 { - t.Errorf("Sources map should have just two elements, but has %v", len(sm.srcs)) + if sm.deduceCoord.rootxt.Len() != 4 { + t.Errorf("Root path trie should have four elements, one for each unique root; has %v", sm.deduceCoord.rootxt.Len()) } } @@ -598,11 +517,13 @@ func TestMultiFetchThreadsafe(t *testing.T) { t.Skip("Skipping slow test in short mode") } - t.Skip("UGH: this is demonstrating real concurrency problems; skipping until we've fixed them") - projects := []ProjectIdentifier{ mkPI("github.com/sdboyer/gps"), mkPI("github.com/sdboyer/gpkt"), + ProjectIdentifier{ + ProjectRoot: ProjectRoot("github.com/sdboyer/gpkt"), + Source: "https://github.com/sdboyer/gpkt", + }, mkPI("github.com/sdboyer/gogl"), mkPI("github.com/sdboyer/gliph"), mkPI("github.com/sdboyer/frozone"), @@ -617,62 +538,77 @@ func TestMultiFetchThreadsafe(t *testing.T) { //mkPI("bitbucket.org/sdboyer/nobm"), } - // 40 gives us ten calls per op, per project, which should be(?) decently - // likely to reveal underlying parallelism problems + do := func(name string, sm *SourceMgr) { + t.Run(name, func(t *testing.T) { + // This gives us ten calls per op, per project, which should be(?) + // decently likely to reveal underlying concurrency problems + ops := 4 + cnum := len(projects) * ops * 10 - do := func(sm *SourceMgr) { - wg := &sync.WaitGroup{} - cnum := len(projects) * 40 + for i := 0; i < cnum; i++ { + // Trigger all four ops on each project, then move on to the next + // project. + id, op := projects[(i/ops)%len(projects)], i%ops + // The count of times this op has been been invoked on this project + // (after the upcoming invocation) + opcount := i/(ops*len(projects)) + 1 - for i := 0; i < cnum; i++ { - wg.Add(1) - - go func(id ProjectIdentifier, pass int) { - switch pass { + switch op { case 0: - t.Logf("Deducing root for %s", id.errString()) - _, err := sm.DeduceProjectRoot(string(id.ProjectRoot)) - if err != nil { - t.Errorf("err on deducing project root for %s: %s", id.errString(), err.Error()) - } + t.Run(fmt.Sprintf("deduce:%v:%s", opcount, id.errString()), func(t *testing.T) { + t.Parallel() + if _, err := sm.DeduceProjectRoot(string(id.ProjectRoot)); err != nil { + t.Error(err) + } + }) case 1: - t.Logf("syncing %s", id) - err := sm.SyncSourceFor(id) - if err != nil { - t.Errorf("syncing failed for %s with err %s", id.errString(), err.Error()) - } + t.Run(fmt.Sprintf("sync:%v:%s", opcount, id.errString()), func(t *testing.T) { + t.Parallel() + err := sm.SyncSourceFor(id) + if err != nil { + t.Error(err) + } + }) case 2: - t.Logf("listing versions for %s", id) - _, err := sm.ListVersions(id) - if err != nil { - t.Errorf("listing versions failed for %s with err %s", id.errString(), err.Error()) - } + t.Run(fmt.Sprintf("listVersions:%v:%s", opcount, id.errString()), func(t *testing.T) { + t.Parallel() + vl, err := sm.ListVersions(id) + if err != nil { + t.Fatal(err) + } + if len(vl) == 0 { + t.Error("no versions returned") + } + }) case 3: - t.Logf("Checking source existence for %s", id) - y, err := sm.SourceExists(id) - if err != nil { - t.Errorf("err on checking source existence for %s: %s", id.errString(), err.Error()) - } - if !y { - t.Errorf("claims %s source does not exist", id.errString()) - } + t.Run(fmt.Sprintf("exists:%v:%s", opcount, id.errString()), func(t *testing.T) { + t.Parallel() + y, err := sm.SourceExists(id) + if err != nil { + t.Fatal(err) + } + if !y { + t.Error("said source does not exist") + } + }) default: - panic(fmt.Sprintf("wtf, %s %v", id, pass)) + panic(fmt.Sprintf("wtf, %s %v", id, op)) } - wg.Done() - }(projects[i%len(projects)], (i/len(projects))%4) - - runtime.Gosched() - } - wg.Wait() + } + }) } sm, _ := mkNaiveSM(t) - do(sm) + do("first", sm) + // Run the thing twice with a remade sm so that we cover both the cases of - // pre-existing and new clones + // pre-existing and new clones. + // + // This triggers a release of the first sm, which is much of what we're + // testing here - that the release is complete and clean, and can be + // immediately followed by a new sm coming in. sm2, clean := remakeNaiveSM(sm, t) - do(sm2) + do("second", sm2) clean() } @@ -747,7 +683,7 @@ func TestErrAfterRelease(t *testing.T) { t.Errorf("ListPackages errored after Release(), but with unexpected error: %T %s", terr, terr.Error()) } - _, _, err = sm.GetManifestAndLock(id, nil) + _, _, err = sm.GetManifestAndLock(id, nil, naiveAnalyzer{}) if err == nil { t.Errorf("GetManifestAndLock did not error after calling Release()") } else if terr, ok := err.(smIsReleased); !ok { @@ -775,11 +711,6 @@ func TestSignalHandling(t *testing.T) { } sm, clean := mkNaiveSM(t) - //get self proc - proc, err := os.FindProcess(os.Getpid()) - if err != nil { - t.Fatal("cannot find self proc") - } sigch := make(chan os.Signal) sm.HandleSignals(sigch) @@ -797,71 +728,158 @@ func TestSignalHandling(t *testing.T) { } clean() + // Test again, this time with a running call sm, clean = mkNaiveSM(t) - sm.UseDefaultSignalHandling() - go sm.DeduceProjectRoot("rsc.io/pdf") - runtime.Gosched() + sm.HandleSignals(sigch) - // signal the process and call release right afterward - now := time.Now() - proc.Signal(os.Interrupt) - sigdur := time.Since(now) - t.Logf("time to send signal: %v", sigdur) - sm.Release() - reldur := time.Since(now) - sigdur - t.Logf("time to return from Release(): %v", reldur) + errchan := make(chan error) + go func() { + _, callerr := sm.DeduceProjectRoot("k8s.io/kubernetes") + errchan <- callerr + }() + go func() { sigch <- os.Interrupt }() + runtime.Gosched() - if reldur < 10*time.Millisecond { - t.Errorf("finished too fast (%v); the necessary network request could not have completed yet", reldur) + callerr := <-errchan + if callerr == nil { + t.Error("network call could not have completed before cancellation, should have gotten an error") } if atomic.LoadInt32(&sm.releasing) != 1 { t.Error("Releasing flag did not get set") } - - lpath = filepath.Join(sm.cachedir, "sm.lock") - if _, err := os.Stat(lpath); err == nil { - t.Error("Expected error on statting what should be an absent lock file") - } clean() sm, clean = mkNaiveSM(t) + // Ensure that handling also works after stopping and restarting itself, + // and that Release happens only once. sm.UseDefaultSignalHandling() sm.StopSignalHandling() - sm.UseDefaultSignalHandling() - - go sm.DeduceProjectRoot("rsc.io/pdf") - //runtime.Gosched() - // Ensure that it all works after teardown and re-set up - proc.Signal(os.Interrupt) - // Wait for twice the time it took to do it last time; should be safe - <-time.After(reldur * 2) + sm.HandleSignals(sigch) - // proc.Signal doesn't send for windows, so just force it - if runtime.GOOS == "windows" { + go func() { + _, callerr := sm.DeduceProjectRoot("k8s.io/kubernetes") + errchan <- callerr + }() + go func() { + sigch <- os.Interrupt sm.Release() - } + }() + runtime.Gosched() - if atomic.LoadInt32(&sm.releasing) != 1 { - t.Error("Releasing flag did not get set") + after := time.After(2 * time.Second) + select { + case <-sm.qch: + case <-after: + t.Error("did not shut down in reasonable time") } - lpath = filepath.Join(sm.cachedir, "sm.lock") - if _, err := os.Stat(lpath); err == nil { - t.Fatal("Expected error on statting what should be an absent lock file") - } clean() } func TestUnreachableSource(t *testing.T) { // If a git remote is unreachable (maybe the server is only accessible behind a VPN, or // something), we should return a clear error, not a panic. + if testing.Short() { + t.Skip("Skipping slow test in short mode") + } sm, clean := mkNaiveSM(t) defer clean() - id := mkPI("golang.org/notareal/repo").normalize() - _, err := sm.ListVersions(id) + id := mkPI("github.com/golang/notexist").normalize() + err := sm.SyncSourceFor(id) if err == nil { t.Error("expected err when listing versions of a bogus source, but got nil") } } + +func TestSupervisor(t *testing.T) { + bgc := context.Background() + ctx, cancelFunc := context.WithCancel(bgc) + superv := newSupervisor(ctx) + + ci := callInfo{ + name: "foo", + typ: 0, + } + + _, err := superv.start(ci) + if err != nil { + t.Fatal("unexpected err on setUpCall:", err) + } + + tc, exists := superv.running[ci] + if !exists { + t.Fatal("running call not recorded in map") + } + + if tc.count != 1 { + t.Fatalf("wrong count of running ci: wanted 1 got %v", tc.count) + } + + // run another, but via do + block, wait := make(chan struct{}), make(chan struct{}) + go func() { + wait <- struct{}{} + err := superv.do(bgc, "foo", 0, func(ctx context.Context) error { + <-block + return nil + }) + if err != nil { + t.Fatal("unexpected err on do() completion:", err) + } + close(wait) + }() + <-wait + + superv.mu.Lock() + tc, exists = superv.running[ci] + if !exists { + t.Fatal("running call not recorded in map") + } + + if tc.count != 2 { + t.Fatalf("wrong count of running ci: wanted 2 got %v", tc.count) + } + superv.mu.Unlock() + + close(block) + <-wait + superv.mu.Lock() + if len(superv.ran) != 0 { + t.Fatal("should not record metrics until last one drops") + } + + tc, exists = superv.running[ci] + if !exists { + t.Fatal("running call not recorded in map") + } + + if tc.count != 1 { + t.Fatalf("wrong count of running ci: wanted 1 got %v", tc.count) + } + superv.mu.Unlock() + + superv.done(ci) + superv.mu.Lock() + ran, exists := superv.ran[0] + if !exists { + t.Fatal("should have metrics after closing last of a ci, but did not") + } + + if ran.count != 1 { + t.Fatalf("wrong count of serial runs of a call: wanted 1 got %v", ran.count) + } + superv.mu.Unlock() + + cancelFunc() + _, err = superv.start(ci) + if err == nil { + t.Fatal("should have errored on cm.run() after canceling cm's input context") + } + + superv.do(bgc, "foo", 0, func(ctx context.Context) error { + t.Fatal("calls should not be initiated by do() after main context is cancelled") + return nil + }) +} diff --git a/vendor/github.com/sdboyer/gps/maybe_source.go b/vendor/github.com/sdboyer/gps/maybe_source.go index 5e74ce95c0..d680937f7b 100644 --- a/vendor/github.com/sdboyer/gps/maybe_source.go +++ b/vendor/github.com/sdboyer/gps/maybe_source.go @@ -2,9 +2,11 @@ package gps import ( "bytes" + "context" "fmt" "net/url" "path/filepath" + "strings" "github.com/Masterminds/vcs" ) @@ -12,29 +14,41 @@ import ( // A maybeSource represents a set of information that, given some // typically-expensive network effort, could be transformed into a proper source. // -// Wrapping these up as their own type kills two birds with one stone: +// Wrapping these up as their own type achieves two goals: // // * Allows control over when deduction logic triggers network activity // * Makes it easy to attempt multiple URLs for a given import path type maybeSource interface { - try(cachedir string, an ProjectAnalyzer) (source, string, error) + try(ctx context.Context, cachedir string, c singleSourceCache, superv *supervisor) (source, sourceState, error) + getURL() string } type maybeSources []maybeSource -func (mbs maybeSources) try(cachedir string, an ProjectAnalyzer) (source, string, error) { +func (mbs maybeSources) try(ctx context.Context, cachedir string, c singleSourceCache, superv *supervisor) (source, sourceState, error) { var e sourceFailures for _, mb := range mbs { - src, ident, err := mb.try(cachedir, an) + src, state, err := mb.try(ctx, cachedir, c, superv) if err == nil { - return src, ident, nil + return src, state, nil } e = append(e, sourceSetupFailure{ - ident: ident, + ident: mb.getURL(), err: err, }) } - return nil, "", e + return nil, 0, e +} + +// This really isn't generally intended to be used - the interface is for +// maybeSources to be able to interrogate its members, not other things to +// interrogate a maybeSources. +func (mbs maybeSources) getURL() string { + strslice := make([]string, 0, len(mbs)) + for _, mb := range mbs { + strslice = append(strslice, mb.getURL()) + } + return strings.Join(strslice, "\n") } type sourceSetupFailure struct { @@ -62,34 +76,45 @@ type maybeGitSource struct { url *url.URL } -func (m maybeGitSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) { +func (m maybeGitSource) try(ctx context.Context, cachedir string, c singleSourceCache, superv *supervisor) (source, sourceState, error) { ustr := m.url.String() path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr)) + r, err := vcs.NewGitRepo(ustr, path) if err != nil { - return nil, ustr, unwrapVcsErr(err) + return nil, 0, unwrapVcsErr(err) } src := &gitSource{ baseVCSSource: baseVCSSource{ - an: an, - dc: newMetaCache(), - crepo: &repo{ - r: &gitRepo{r}, - rpath: path, - }, + repo: &gitRepo{r}, }, } - src.baseVCSSource.lvfunc = src.listVersions - if !r.CheckLocal() { - _, err = src.listVersions() - if err != nil { - return nil, ustr, unwrapVcsErr(err) + // Pinging invokes the same action as calling listVersions, so just do that. + var vl []PairedVersion + err = superv.do(ctx, "git:lv:maybe", ctListVersions, func(ctx context.Context) (err error) { + if vl, err = src.listVersions(ctx); err != nil { + return fmt.Errorf("remote repository at %s does not exist, or is inaccessible", ustr) } + return nil + }) + if err != nil { + return nil, 0, err + } + + c.storeVersionMap(vl, true) + state := sourceIsSetUp | sourceExistsUpstream | sourceHasLatestVersionList + + if r.CheckLocal() { + state |= sourceExistsLocally } - return src, ustr, nil + return src, state, nil +} + +func (m maybeGitSource) getURL() string { + return m.url.String() } type maybeGopkginSource struct { @@ -104,106 +129,130 @@ type maybeGopkginSource struct { major uint64 } -func (m maybeGopkginSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) { +func (m maybeGopkginSource) try(ctx context.Context, cachedir string, c singleSourceCache, superv *supervisor) (source, sourceState, error) { // We don't actually need a fully consistent transform into the on-disk path // - just something that's unique to the particular gopkg.in domain context. // So, it's OK to just dumb-join the scheme with the path. path := filepath.Join(cachedir, "sources", sanitizer.Replace(m.url.Scheme+"/"+m.opath)) ustr := m.url.String() + r, err := vcs.NewGitRepo(ustr, path) if err != nil { - return nil, ustr, unwrapVcsErr(err) + return nil, 0, unwrapVcsErr(err) } src := &gopkginSource{ gitSource: gitSource{ baseVCSSource: baseVCSSource{ - an: an, - dc: newMetaCache(), - crepo: &repo{ - r: &gitRepo{r}, - rpath: path, - }, + repo: &gitRepo{r}, }, }, major: m.major, } - src.baseVCSSource.lvfunc = src.listVersions - if !r.CheckLocal() { - _, err = src.listVersions() - if err != nil { - return nil, ustr, unwrapVcsErr(err) + var vl []PairedVersion + err = superv.do(ctx, "git:lv:maybe", ctListVersions, func(ctx context.Context) (err error) { + if vl, err = src.listVersions(ctx); err != nil { + return fmt.Errorf("remote repository at %s does not exist, or is inaccessible", ustr) } + return nil + }) + if err != nil { + return nil, 0, err + } + + c.storeVersionMap(vl, true) + state := sourceIsSetUp | sourceExistsUpstream | sourceHasLatestVersionList + + if r.CheckLocal() { + state |= sourceExistsLocally } - return src, ustr, nil + return src, state, nil +} + +func (m maybeGopkginSource) getURL() string { + return m.opath } type maybeBzrSource struct { url *url.URL } -func (m maybeBzrSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) { +func (m maybeBzrSource) try(ctx context.Context, cachedir string, c singleSourceCache, superv *supervisor) (source, sourceState, error) { ustr := m.url.String() path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr)) + r, err := vcs.NewBzrRepo(ustr, path) if err != nil { - return nil, ustr, unwrapVcsErr(err) + return nil, 0, unwrapVcsErr(err) } - if !r.Ping() { - return nil, ustr, fmt.Errorf("remote repository at %s does not exist, or is inaccessible", ustr) + + err = superv.do(ctx, "bzr:ping", ctSourcePing, func(ctx context.Context) error { + if !r.Ping() { + return fmt.Errorf("remote repository at %s does not exist, or is inaccessible", ustr) + } + return nil + }) + if err != nil { + return nil, 0, err + } + + state := sourceIsSetUp | sourceExistsUpstream + if r.CheckLocal() { + state |= sourceExistsLocally } src := &bzrSource{ baseVCSSource: baseVCSSource{ - an: an, - dc: newMetaCache(), - ex: existence{ - s: existsUpstream, - f: existsUpstream, - }, - crepo: &repo{ - r: &bzrRepo{r}, - rpath: path, - }, + repo: &bzrRepo{r}, }, } - src.baseVCSSource.lvfunc = src.listVersions - return src, ustr, nil + return src, state, nil +} + +func (m maybeBzrSource) getURL() string { + return m.url.String() } type maybeHgSource struct { url *url.URL } -func (m maybeHgSource) try(cachedir string, an ProjectAnalyzer) (source, string, error) { +func (m maybeHgSource) try(ctx context.Context, cachedir string, c singleSourceCache, superv *supervisor) (source, sourceState, error) { ustr := m.url.String() path := filepath.Join(cachedir, "sources", sanitizer.Replace(ustr)) + r, err := vcs.NewHgRepo(ustr, path) if err != nil { - return nil, ustr, unwrapVcsErr(err) + return nil, 0, unwrapVcsErr(err) } - if !r.Ping() { - return nil, ustr, fmt.Errorf("remote repository at %s does not exist, or is inaccessible", ustr) + + err = superv.do(ctx, "hg:ping", ctSourcePing, func(ctx context.Context) error { + if !r.Ping() { + return fmt.Errorf("remote repository at %s does not exist, or is inaccessible", ustr) + } + return nil + }) + if err != nil { + return nil, 0, err + } + + state := sourceIsSetUp | sourceExistsUpstream + if r.CheckLocal() { + state |= sourceExistsLocally } src := &hgSource{ baseVCSSource: baseVCSSource{ - an: an, - dc: newMetaCache(), - ex: existence{ - s: existsUpstream, - f: existsUpstream, - }, - crepo: &repo{ - r: &hgRepo{r}, - rpath: path, - }, + repo: &hgRepo{r}, }, } - src.baseVCSSource.lvfunc = src.listVersions - return src, ustr, nil + return src, state, nil +} + +func (m maybeHgSource) getURL() string { + return m.url.String() } diff --git a/vendor/github.com/sdboyer/gps/pkgtree/pkgtree.go b/vendor/github.com/sdboyer/gps/pkgtree/pkgtree.go index 5717f0b267..746f16ab0d 100644 --- a/vendor/github.com/sdboyer/gps/pkgtree/pkgtree.go +++ b/vendor/github.com/sdboyer/gps/pkgtree/pkgtree.go @@ -195,6 +195,10 @@ func fillPackage(p *build.Package) error { var testImports []string var imports []string for _, file := range gofiles { + // Skip underscore-led files, in keeping with the rest of the toolchain. + if filepath.Base(file)[0] == '_' { + continue + } pf, err := parser.ParseFile(token.NewFileSet(), file, nil, parser.ImportsOnly|parser.ParseComments) if err != nil { if os.IsPermission(err) { diff --git a/vendor/github.com/sdboyer/gps/pkgtree/pkgtree_test.go b/vendor/github.com/sdboyer/gps/pkgtree/pkgtree_test.go index 2dce984286..7196ed160a 100644 --- a/vendor/github.com/sdboyer/gps/pkgtree/pkgtree_test.go +++ b/vendor/github.com/sdboyer/gps/pkgtree/pkgtree_test.go @@ -450,9 +450,12 @@ func TestWorkmapToReach(t *testing.T) { } for name, fix := range table { - // Avoid erroneous errors by initializing the fixture's error map if - // needed + name, fix := name, fix t.Run(name, func(t *testing.T) { + t.Parallel() + + // Avoid erroneous errors by initializing the fixture's error map if + // needed if fix.em == nil { fix.em = make(map[string]*ProblemImportError) } @@ -1150,6 +1153,26 @@ func TestListPackages(t *testing.T) { }, }, }, + "skip underscore": { + fileRoot: j("skip_"), + importRoot: "skip_", + out: PackageTree{ + ImportRoot: "skip_", + Packages: map[string]PackageOrErr{ + "skip_": { + P: Package{ + ImportPath: "skip_", + CommentPath: "", + Name: "skip", + Imports: []string{ + "github.com/sdboyer/gps", + "sort", + }, + }, + }, + }, + }, + }, // This case mostly exists for the PackageTree methods, but it does // cover a bit of range "varied": { @@ -1329,8 +1352,7 @@ func TestListPackagesNoPerms(t *testing.T) { } tmp, err := ioutil.TempDir("", "listpkgsnp") if err != nil { - t.Errorf("Failed to create temp dir: %s", err) - t.FailNow() + t.Fatalf("Failed to create temp dir: %s", err) } defer os.RemoveAll(tmp) @@ -1341,13 +1363,11 @@ func TestListPackagesNoPerms(t *testing.T) { // chmod the simple dir and m1p/b.go file so they can't be read err = os.Chmod(filepath.Join(workdir, "simple"), 0) if err != nil { - t.Error("Error while chmodding simple dir", err) - t.FailNow() + t.Fatalf("Error while chmodding simple dir: %s", err) } os.Chmod(filepath.Join(workdir, "m1p", "b.go"), 0) if err != nil { - t.Error("Error while chmodding b.go file", err) - t.FailNow() + t.Fatalf("Error while chmodding b.go file: %s", err) } want := PackageTree{ @@ -1375,12 +1395,10 @@ func TestListPackagesNoPerms(t *testing.T) { got, err := ListPackages(workdir, "ren") if err != nil { - t.Errorf("Unexpected err from ListPackages: %s", err) - t.FailNow() + t.Fatalf("Unexpected err from ListPackages: %s", err) } if want.ImportRoot != got.ImportRoot { - t.Errorf("Expected ImportRoot %s, got %s", want.ImportRoot, got.ImportRoot) - t.FailNow() + t.Fatalf("Expected ImportRoot %s, got %s", want.ImportRoot, got.ImportRoot) } if !reflect.DeepEqual(got, want) { diff --git a/vendor/github.com/sdboyer/gps/result_test.go b/vendor/github.com/sdboyer/gps/result_test.go index 1cf9273266..b5a59ec6bf 100644 --- a/vendor/github.com/sdboyer/gps/result_test.go +++ b/vendor/github.com/sdboyer/gps/result_test.go @@ -39,7 +39,9 @@ func init() { } } -func TestWriteDepTree(t *testing.T) { +func testWriteDepTree(t *testing.T) { + t.Parallel() + // This test is a bit slow, skip it on -short if testing.Short() { t.Skip("Skipping dep tree writing test in short mode") @@ -48,8 +50,7 @@ func TestWriteDepTree(t *testing.T) { tmp, err := ioutil.TempDir("", "writetree") if err != nil { - t.Errorf("Failed to create temp dir: %s", err) - t.FailNow() + t.Fatalf("Failed to create temp dir: %s", err) } defer os.RemoveAll(tmp) @@ -74,6 +75,11 @@ func TestWriteDepTree(t *testing.T) { sm, clean := mkNaiveSM(t) defer clean() + // Trigger simultaneous fetch of all three to speed up test execution time + for _, p := range r.p { + go sm.SyncSourceFor(p.pi) + } + // nil lock/result should err immediately err = WriteDepTree(tmp, nil, sm, true) if err == nil { @@ -104,7 +110,7 @@ func BenchmarkCreateVendorTree(b *testing.B) { tmp := path.Join(os.TempDir(), "vsolvtest") clean := true - sm, err := NewSourceManager(naiveAnalyzer{}, path.Join(tmp, "cache")) + sm, err := NewSourceManager(path.Join(tmp, "cache")) if err != nil { b.Errorf("NewSourceManager errored unexpectedly: %q", err) clean = false diff --git a/vendor/github.com/sdboyer/gps/rootdata.go b/vendor/github.com/sdboyer/gps/rootdata.go index 79d838216b..9548ebad90 100644 --- a/vendor/github.com/sdboyer/gps/rootdata.go +++ b/vendor/github.com/sdboyer/gps/rootdata.go @@ -42,6 +42,9 @@ type rootdata struct { // A defensively copied instance of params.RootPackageTree rpt pkgtree.PackageTree + + // The ProjectAnalyzer to use for all GetManifestAndLock calls. + an ProjectAnalyzer } // externalImportList returns a list of the unique imports from the root data. diff --git a/vendor/github.com/sdboyer/gps/rootdata_test.go b/vendor/github.com/sdboyer/gps/rootdata_test.go index e3126322bc..15e7e7e634 100644 --- a/vendor/github.com/sdboyer/gps/rootdata_test.go +++ b/vendor/github.com/sdboyer/gps/rootdata_test.go @@ -12,12 +12,12 @@ func TestRootdataExternalImports(t *testing.T) { RootDir: string(fix.ds[0].n), RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), + ProjectAnalyzer: naiveAnalyzer{}, } is, err := Prepare(params, newdepspecSM(fix.ds, nil)) if err != nil { - t.Errorf("Unexpected error while prepping solver: %s", err) - t.FailNow() + t.Fatalf("Unexpected error while prepping solver: %s", err) } rd := is.(*solver).rd @@ -65,12 +65,12 @@ func TestGetApplicableConstraints(t *testing.T) { RootDir: string(fix.ds[0].n), RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), + ProjectAnalyzer: naiveAnalyzer{}, } is, err := Prepare(params, newdepspecSM(fix.ds, nil)) if err != nil { - t.Errorf("Unexpected error while prepping solver: %s", err) - t.FailNow() + t.Fatalf("Unexpected error while prepping solver: %s", err) } rd := is.(*solver).rd diff --git a/vendor/github.com/sdboyer/gps/satisfy.go b/vendor/github.com/sdboyer/gps/satisfy.go index e2c8403534..19392ae9e6 100644 --- a/vendor/github.com/sdboyer/gps/satisfy.go +++ b/vendor/github.com/sdboyer/gps/satisfy.go @@ -82,7 +82,7 @@ func (s *solver) check(a atomWithPackages, pkgonly bool) error { // the constraints established by the current solution. func (s *solver) checkAtomAllowable(pa atom) error { constraint := s.sel.getConstraint(pa.id) - if s.b.matches(pa.id, constraint, pa.v) { + if s.vUnify.matches(pa.id, constraint, pa.v) { return nil } // TODO(sdboyer) collect constraint failure reason (wait...aren't we, below?) @@ -90,7 +90,7 @@ func (s *solver) checkAtomAllowable(pa atom) error { deps := s.sel.getDependenciesOn(pa.id) var failparent []dependency for _, dep := range deps { - if !s.b.matches(pa.id, dep.dep.Constraint, pa.v) { + if !s.vUnify.matches(pa.id, dep.dep.Constraint, pa.v) { s.fail(dep.depender.id) failparent = append(failparent, dep) } @@ -152,7 +152,7 @@ func (s *solver) checkDepsConstraintsAllowable(a atomWithPackages, cdep complete constraint := s.sel.getConstraint(dep.Ident) // Ensure the constraint expressed by the dep has at least some possible // intersection with the intersection of existing constraints. - if s.b.matchesAny(dep.Ident, constraint, dep.Constraint) { + if s.vUnify.matchesAny(dep.Ident, constraint, dep.Constraint) { return nil } @@ -161,7 +161,7 @@ func (s *solver) checkDepsConstraintsAllowable(a atomWithPackages, cdep complete var failsib []dependency var nofailsib []dependency for _, sibling := range siblings { - if !s.b.matchesAny(dep.Ident, sibling.dep.Constraint, dep.Constraint) { + if !s.vUnify.matchesAny(dep.Ident, sibling.dep.Constraint, dep.Constraint) { s.fail(sibling.depender.id) failsib = append(failsib, sibling) } else { @@ -183,7 +183,7 @@ func (s *solver) checkDepsConstraintsAllowable(a atomWithPackages, cdep complete func (s *solver) checkDepsDisallowsSelected(a atomWithPackages, cdep completeDep) error { dep := cdep.workingConstraint selected, exists := s.sel.selected(dep.Ident) - if exists && !s.b.matches(dep.Ident, dep.Constraint, selected.a.v) { + if exists && !s.vUnify.matches(dep.Ident, dep.Constraint, selected.a.v) { s.fail(dep.Ident) return &constraintNotAllowedFailure{ diff --git a/vendor/github.com/sdboyer/gps/selection.go b/vendor/github.com/sdboyer/gps/selection.go index d1fe95d785..89e72bbe62 100644 --- a/vendor/github.com/sdboyer/gps/selection.go +++ b/vendor/github.com/sdboyer/gps/selection.go @@ -3,7 +3,7 @@ package gps type selection struct { projects []selected deps map[ProjectRoot][]dependency - sm sourceBridge + vu versionUnifier } type selected struct { @@ -124,7 +124,7 @@ func (s *selection) getConstraint(id ProjectIdentifier) Constraint { // Start with the open set var ret Constraint = any for _, dep := range deps { - ret = s.sm.intersect(id, ret, dep.dep.Constraint) + ret = s.vu.intersect(id, ret, dep.dep.Constraint) } return ret diff --git a/vendor/github.com/sdboyer/gps/solve_basic_test.go b/vendor/github.com/sdboyer/gps/solve_basic_test.go index 575bfa54a0..a04d258943 100644 --- a/vendor/github.com/sdboyer/gps/solve_basic_test.go +++ b/vendor/github.com/sdboyer/gps/solve_basic_test.go @@ -1384,7 +1384,7 @@ func newdepspecSM(ds []depspec, ignore []string) *depspecSourceManager { } } -func (sm *depspecSourceManager) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) { +func (sm *depspecSourceManager) GetManifestAndLock(id ProjectIdentifier, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { // If the input version is a PairedVersion, look only at its top version, // not the underlying. This is generally consistent with the idea that, for // this class of lookup, the rev probably DOES exist, but upstream changed @@ -1405,10 +1405,6 @@ func (sm *depspecSourceManager) GetManifestAndLock(id ProjectIdentifier, v Versi return nil, nil, fmt.Errorf("Project %s at version %s could not be found", id.errString(), v) } -func (sm *depspecSourceManager) AnalyzerInfo() (string, int) { - return "depspec-sm-builtin", 1 -} - func (sm *depspecSourceManager) ExternalReach(id ProjectIdentifier, v Version) (map[string][]string, error) { pid := pident{n: ProjectRoot(id.normalizedSource()), v: v} if m, exists := sm.rm[pid]; exists { @@ -1419,6 +1415,12 @@ func (sm *depspecSourceManager) ExternalReach(id ProjectIdentifier, v Version) ( func (sm *depspecSourceManager) ListPackages(id ProjectIdentifier, v Version) (pkgtree.PackageTree, error) { pid := pident{n: ProjectRoot(id.normalizedSource()), v: v} + if pv, ok := v.(PairedVersion); ok && pv.Underlying() == "FAKEREV" { + // An empty rev may come in here because that's what we produce in + // ListVersions(). If that's what we see, then just pretend like we have + // an unpaired. + pid.v = pv.Unpair() + } if r, exists := sm.rm[pid]; exists { return pkgtree.PackageTree{ @@ -1460,20 +1462,32 @@ func (sm *depspecSourceManager) ListPackages(id ProjectIdentifier, v Version) (p return pkgtree.PackageTree{}, fmt.Errorf("Project %s at version %s could not be found", pid.n, v) } -func (sm *depspecSourceManager) ListVersions(id ProjectIdentifier) (pi []Version, err error) { +func (sm *depspecSourceManager) ListVersions(id ProjectIdentifier) ([]PairedVersion, error) { + var pvl []PairedVersion for _, ds := range sm.specs { - // To simulate the behavior of the real SourceManager, we do not return - // revisions from ListVersions(). - if _, isrev := ds.v.(Revision); !isrev && id.normalizedSource() == string(ds.n) { - pi = append(pi, ds.v) + if id.normalizedSource() != string(ds.n) { + continue } - } - if len(pi) == 0 { - err = fmt.Errorf("Project %s could not be found", id.errString()) + switch tv := ds.v.(type) { + case Revision: + // To simulate the behavior of the real SourceManager, we do not return + // raw revisions from listVersions(). + case PairedVersion: + pvl = append(pvl, tv) + case UnpairedVersion: + // Dummy revision; if the fixture doesn't provide it, we know + // the test doesn't need revision info, anyway. + pvl = append(pvl, tv.Is(Revision("FAKEREV"))) + default: + panic(fmt.Sprintf("unreachable: type of version was %#v for spec %s", ds.v, id.errString())) + } } - return + if len(pvl) == 0 { + return nil, fmt.Errorf("Project %s could not be found", id.errString()) + } + return pvl, nil } func (sm *depspecSourceManager) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, error) { @@ -1536,6 +1550,37 @@ type depspecBridge struct { *bridge } +func (b *depspecBridge) listVersions(id ProjectIdentifier) ([]Version, error) { + if vl, exists := b.vlists[id]; exists { + return vl, nil + } + + pvl, err := b.sm.ListVersions(id) + if err != nil { + return nil, err + } + + // Construct a []Version slice. If any paired versions use the fake rev, + // remove the underlying component. + vl := make([]Version, 0, len(pvl)) + for _, v := range pvl { + if v.Underlying() == "FAKEREV" { + vl = append(vl, v.Unpair()) + } else { + vl = append(vl, v) + } + } + + if b.down { + SortForDowngrade(vl) + } else { + SortForUpgrade(vl) + } + + b.vlists[id] = vl + return vl, nil +} + // override verifyRoot() on bridge to prevent any filesystem interaction func (b *depspecBridge) verifyRootDir(path string) error { root := b.sm.(fixSM).rootSpec() diff --git a/vendor/github.com/sdboyer/gps/solve_bimodal_test.go b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go index 48f63a404b..5b5927d452 100644 --- a/vendor/github.com/sdboyer/gps/solve_bimodal_test.go +++ b/vendor/github.com/sdboyer/gps/solve_bimodal_test.go @@ -1134,7 +1134,7 @@ func (sm *bmSourceManager) ListPackages(id ProjectIdentifier, v Version) (pkgtre return pkgtree.PackageTree{}, fmt.Errorf("Project %s at version %s could not be found", id.errString(), v) } -func (sm *bmSourceManager) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) { +func (sm *bmSourceManager) GetManifestAndLock(id ProjectIdentifier, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { for _, ds := range sm.specs { if id.normalizedSource() == string(ds.n) && v.Matches(ds.v) { if l, exists := sm.lm[id.normalizedSource()+" "+v.String()]; exists { diff --git a/vendor/github.com/sdboyer/gps/solve_test.go b/vendor/github.com/sdboyer/gps/solve_test.go index 8fc4161b96..a7a7d2371e 100644 --- a/vendor/github.com/sdboyer/gps/solve_test.go +++ b/vendor/github.com/sdboyer/gps/solve_test.go @@ -104,6 +104,7 @@ func TestBasicSolves(t *testing.T) { sort.Strings(names) for _, n := range names { t.Run(n, func(t *testing.T) { + //t.Parallel() // until trace output is fixed in parallel solveBasicsAndCheck(basicFixtures[n], t) }) } @@ -121,6 +122,7 @@ func solveBasicsAndCheck(fix basicFixture, t *testing.T) (res Solution, err erro Downgrade: fix.downgrade, ChangeAll: fix.changeall, ToChange: fix.changelist, + ProjectAnalyzer: naiveAnalyzer{}, } if fix.l != nil { @@ -150,6 +152,7 @@ func TestBimodalSolves(t *testing.T) { sort.Strings(names) for _, n := range names { t.Run(n, func(t *testing.T) { + //t.Parallel() // until trace output is fixed in parallel solveBimodalAndCheck(bimodalFixtures[n], t) }) } @@ -166,6 +169,7 @@ func solveBimodalAndCheck(fix bimodalFixture, t *testing.T) (res Solution, err e Lock: dummyLock{}, Downgrade: fix.downgrade, ChangeAll: fix.changeall, + ProjectAnalyzer: naiveAnalyzer{}, } if fix.l != nil { @@ -196,15 +200,15 @@ func fixtureSolveSimpleChecks(fix specfix, soln Solution, err error, t *testing. fixfail := fix.failure() if err != nil { if fixfail == nil { - t.Errorf("(fixture: %q) Solve failed unexpectedly:\n%s", fix.name(), err) + t.Errorf("Solve failed unexpectedly:\n%s", err) } else if !reflect.DeepEqual(fixfail, err) { // TODO(sdboyer) reflect.DeepEqual works for now, but once we start // modeling more complex cases, this should probably become more robust - t.Errorf("(fixture: %q) Failure mismatch:\n\t(GOT): %s\n\t(WNT): %s", fix.name(), err, fixfail) + t.Errorf("Failure mismatch:\n\t(GOT): %s\n\t(WNT): %s", err, fixfail) } } else if fixfail != nil { var buf bytes.Buffer - fmt.Fprintf(&buf, "(fixture: %q) Solver succeeded, but expecting failure:\n%s\nProjects in solution:", fix.name(), fixfail) + fmt.Fprintf(&buf, "Solver succeeded, but expecting failure:\n%s\nProjects in solution:", fixfail) for _, p := range soln.Projects() { fmt.Fprintf(&buf, "\n\t- %s at %s", ppi(p.Ident()), p.Version()) } @@ -212,7 +216,7 @@ func fixtureSolveSimpleChecks(fix specfix, soln Solution, err error, t *testing. } else { r := soln.(solution) if fix.maxTries() > 0 && r.Attempts() > fix.maxTries() { - t.Errorf("(fixture: %q) Solver completed in %v attempts, but expected %v or fewer", fix.name(), r.att, fix.maxTries()) + t.Errorf("Solver completed in %v attempts, but expected %v or fewer", r.att, fix.maxTries()) } // Dump result projects into a map for easier interrogation @@ -224,23 +228,23 @@ func fixtureSolveSimpleChecks(fix specfix, soln Solution, err error, t *testing. fixlen, rlen := len(fix.solution()), len(rp) if fixlen != rlen { // Different length, so they definitely disagree - t.Errorf("(fixture: %q) Solver reported %v package results, result expected %v", fix.name(), rlen, fixlen) + t.Errorf("Solver reported %v package results, result expected %v", rlen, fixlen) } // Whether or not len is same, still have to verify that results agree // Walk through fixture/expected results first for id, flp := range fix.solution() { if lp, exists := rp[id]; !exists { - t.Errorf("(fixture: %q) Project %q expected but missing from results", fix.name(), ppi(id)) + t.Errorf("Project %q expected but missing from results", ppi(id)) } else { // delete result from map so we skip it on the reverse pass delete(rp, id) if flp.Version() != lp.Version() { - t.Errorf("(fixture: %q) Expected version %q of project %q, but actual version was %q", fix.name(), pv(flp.Version()), ppi(id), pv(lp.Version())) + t.Errorf("Expected version %q of project %q, but actual version was %q", pv(flp.Version()), ppi(id), pv(lp.Version())) } if !reflect.DeepEqual(lp.pkgs, flp.pkgs) { - t.Errorf("(fixture: %q) Package list was not not as expected for project %s@%s:\n\t(GOT) %s\n\t(WNT) %s", fix.name(), ppi(id), pv(lp.Version()), lp.pkgs, flp.pkgs) + t.Errorf("Package list was not not as expected for project %s@%s:\n\t(GOT) %s\n\t(WNT) %s", ppi(id), pv(lp.Version()), lp.pkgs, flp.pkgs) } } } @@ -248,7 +252,7 @@ func fixtureSolveSimpleChecks(fix specfix, soln Solution, err error, t *testing. // Now walk through remaining actual results for id, lp := range rp { if _, exists := fix.solution()[id]; !exists { - t.Errorf("(fixture: %q) Unexpected project %s@%s present in results, with pkgs:\n\t%s", fix.name(), ppi(id), pv(lp.Version()), lp.pkgs) + t.Errorf("Unexpected project %s@%s present in results, with pkgs:\n\t%s", ppi(id), pv(lp.Version()), lp.pkgs) } } } @@ -297,6 +301,7 @@ func TestRootLockNoVersionPairMatching(t *testing.T) { RootPackageTree: fix.rootTree(), Manifest: fix.rootmanifest(), Lock: l2, + ProjectAnalyzer: naiveAnalyzer{}, } res, err := fixSolve(params, sm, t) @@ -321,6 +326,14 @@ func TestBadSolveOpts(t *testing.T) { t.Error("Prepare should have given error on nil SourceManager, but gave:", err) } + _, err = Prepare(params, sm) + if err == nil { + t.Errorf("Prepare should have errored without ProjectAnalyzer") + } else if !strings.Contains(err.Error(), "must provide a ProjectAnalyzer") { + t.Error("Prepare should have given error without ProjectAnalyzer, but gave:", err) + } + + params.ProjectAnalyzer = naiveAnalyzer{} _, err = Prepare(params, sm) if err == nil { t.Errorf("Prepare should have errored on empty root") diff --git a/vendor/github.com/sdboyer/gps/solver.go b/vendor/github.com/sdboyer/gps/solver.go index 3d3d8240b2..5ecb1d4b13 100644 --- a/vendor/github.com/sdboyer/gps/solver.go +++ b/vendor/github.com/sdboyer/gps/solver.go @@ -49,6 +49,13 @@ type SolveParameters struct { // A real path to a readable directory is required. RootDir string + // The ProjectAnalyzer is responsible for extracting Manifest and + // (optionally) Lock information from dependencies. The solver passes it + // along to its SourceManager's GetManifestAndLock() method as needed. + // + // An analyzer is required. + ProjectAnalyzer ProjectAnalyzer + // The tree of packages that comprise the root project, as well as the // import path that should identify the root of that tree. // @@ -120,6 +127,10 @@ type solver struct { // names a SourceManager operates on. b sourceBridge + // A versionUnifier, to facilitate cross-type version comparison and set + // operations. + vUnify versionUnifier + // A stack containing projects and packages that are currently "selected" - // that is, they have passed all satisfiability checks, and are part of the // current solution. @@ -155,6 +166,9 @@ type solver struct { } func (params SolveParameters) toRootdata() (rootdata, error) { + if params.ProjectAnalyzer == nil { + return rootdata{}, badOptsFailure("must provide a ProjectAnalyzer") + } if params.RootDir == "" { return rootdata{}, badOptsFailure("params must specify a non-empty root directory") } @@ -181,6 +195,7 @@ func (params SolveParameters) toRootdata() (rootdata, error) { rlm: make(map[ProjectRoot]LockedProject), chngall: params.ChangeAll, dir: params.RootDir, + an: params.ProjectAnalyzer, } // Ensure the required, ignore and overrides maps are at least initialized @@ -284,11 +299,14 @@ func Prepare(params SolveParameters, sm SourceManager) (Solver, error) { if err != nil { return nil, err } + s.vUnify = versionUnifier{ + b: s.b, + } // Initialize stacks and queues s.sel = &selection{ deps: make(map[ProjectRoot][]dependency), - sm: s.b, + vu: s.vUnify, } s.unsel = &unselected{ sl: make([]bimodalIdentifier, 0), @@ -326,6 +344,7 @@ type Solver interface { func (s *solver) Solve() (Solution, error) { // Set up a metrics object s.mtr = newMetrics() + s.vUnify.mtr = s.mtr // Prime the queues with the root project err := s.selectRoot() @@ -512,7 +531,7 @@ func (s *solver) getImportsAndConstraintsOf(a atomWithPackages) ([]string, []com // Work through the source manager to get project info and static analysis // information. - m, _, err := s.b.GetManifestAndLock(a.a.id, a.a.v) + m, _, err := s.b.GetManifestAndLock(a.a.id, a.a.v, s.rd.an) if err != nil { return nil, nil, err } @@ -699,7 +718,7 @@ func (s *solver) createVersionQueue(bmi bimodalIdentifier) (*versionQueue, error continue } - _, l, err := s.b.GetManifestAndLock(dep.depender.id, dep.depender.v) + _, l, err := s.b.GetManifestAndLock(dep.depender.id, dep.depender.v, s.rd.an) if err != nil || l == nil { // err being non-nil really shouldn't be possible, but the lock // being nil is quite likely @@ -865,7 +884,7 @@ func (s *solver) getLockVersionIfValid(id ProjectIdentifier) (Version, error) { if tv, ok := v.(Revision); ok { // If we only have a revision from the root's lock, allow matching // against other versions that have that revision - for _, pv := range s.b.pairRevision(id, tv) { + for _, pv := range s.vUnify.pairRevision(id, tv) { if constraint.Matches(pv) { v = pv found = true @@ -1027,11 +1046,11 @@ func (s *solver) unselectedComparator(i, j int) bool { // way avoid that call when making a version queue, we know we're gonna have // to pay that cost anyway. - // We can safely ignore an err from ListVersions here because, if there is + // We can safely ignore an err from listVersions here because, if there is // an actual problem, it'll be noted and handled somewhere else saner in the // solving algorithm. - ivl, _ := s.b.ListVersions(iname) - jvl, _ := s.b.ListVersions(jname) + ivl, _ := s.b.listVersions(iname) + jvl, _ := s.b.listVersions(jname) iv, jv := len(ivl), len(jvl) // Packages with fewer versions to pick from are less likely to benefit from @@ -1096,7 +1115,7 @@ func (s *solver) selectAtom(a atomWithPackages, pkgonly bool) { // TODO(sdboyer) making this call here could be the first thing to trigger // network activity...maybe? if so, can we mitigate by deferring the work to // queue consumption time? - _, l, _ := s.b.GetManifestAndLock(a.a.id, a.a.v) + _, l, _ := s.b.GetManifestAndLock(a.a.id, a.a.v, s.rd.an) var lmap map[ProjectIdentifier]Version if l != nil { lmap = make(map[ProjectIdentifier]Version) diff --git a/vendor/github.com/sdboyer/gps/source.go b/vendor/github.com/sdboyer/gps/source.go index 075c8cfd48..4031e5994b 100644 --- a/vendor/github.com/sdboyer/gps/source.go +++ b/vendor/github.com/sdboyer/gps/source.go @@ -1,450 +1,502 @@ package gps import ( + "context" + "errors" "fmt" - "os" - "path/filepath" "sync" "github.com/sdboyer/gps/pkgtree" ) -// sourceExistence values represent the extent to which a project "exists." -type sourceExistence uint8 +// sourceState represent the states that a source can be in, depending on how +// much search and discovery work ahs been done by a source's managing gateway. +// +// These are basically used to achieve a cheap approximation of a FSM. +type sourceState int32 const ( - // ExistsInVendorRoot indicates that a project exists in a vendor directory - // at the predictable location based on import path. It does NOT imply, much - // less guarantee, any of the following: - // - That the code at the expected location under vendor is at the version - // given in a lock file - // - That the code at the expected location under vendor is from the - // expected upstream project at all - // - That, if this flag is not present, the project does not exist at some - // unexpected/nested location under vendor - // - That the full repository history is available. In fact, the - // assumption should be that if only this flag is on, the full repository - // history is likely not available (locally) - // - // In short, the information encoded in this flag should not be construed as - // exhaustive. - existsInVendorRoot sourceExistence = 1 << iota - - // ExistsInCache indicates that a project exists on-disk in the local cache. - // It does not guarantee that an upstream exists, thus it cannot imply - // that the cache is at all correct - up-to-date, or even of the expected - // upstream project repository. - // - // Additionally, this refers only to the existence of the local repository - // itself; it says nothing about the existence or completeness of the - // separate metadata cache. - existsInCache - - // ExistsUpstream indicates that a project repository was locatable at the - // path provided by a project's URI (a base import path). - existsUpstream + sourceIsSetUp sourceState = 1 << iota + sourceExistsUpstream + sourceExistsLocally + sourceHasLatestVersionList + sourceHasLatestLocally ) -type source interface { - syncLocal() error - checkExistence(sourceExistence) bool - exportVersionTo(Version, string) error - getManifestAndLock(ProjectRoot, Version) (Manifest, Lock, error) - listPackages(ProjectRoot, Version) (pkgtree.PackageTree, error) - listVersions() ([]Version, error) - revisionPresentIn(Revision) (bool, error) +type srcReturnChans struct { + ret chan *sourceGateway + err chan error } -type sourceMetaCache struct { - //Version string // TODO(sdboyer) use this - infos map[Revision]projectInfo - ptrees map[Revision]pkgtree.PackageTree - vMap map[UnpairedVersion]Revision - rMap map[Revision][]UnpairedVersion - // TODO(sdboyer) mutexes. actually probably just one, b/c complexity -} - -// projectInfo holds manifest and lock -type projectInfo struct { - Manifest - Lock +func (rc srcReturnChans) awaitReturn() (sg *sourceGateway, err error) { + select { + case sg = <-rc.ret: + case err = <-rc.err: + } + return } -type existence struct { - // The existence levels for which a search/check has been performed - s sourceExistence - - // The existence levels verified to be present through searching - f sourceExistence +type sourceCoordinator struct { + supervisor *supervisor + srcmut sync.RWMutex // guards srcs and nameToURL maps + srcs map[string]*sourceGateway + nameToURL map[string]string + psrcmut sync.Mutex // guards protoSrcs map + protoSrcs map[string][]srcReturnChans + deducer deducer + cachedir string } -func newMetaCache() *sourceMetaCache { - return &sourceMetaCache{ - infos: make(map[Revision]projectInfo), - ptrees: make(map[Revision]pkgtree.PackageTree), - vMap: make(map[UnpairedVersion]Revision), - rMap: make(map[Revision][]UnpairedVersion), +func newSourceCoordinator(superv *supervisor, deducer deducer, cachedir string) *sourceCoordinator { + return &sourceCoordinator{ + supervisor: superv, + deducer: deducer, + cachedir: cachedir, + srcs: make(map[string]*sourceGateway), + nameToURL: make(map[string]string), + protoSrcs: make(map[string][]srcReturnChans), } } -type baseVCSSource struct { - // Object for the cache repository - crepo *repo - - // Indicates the extent to which we have searched for, and verified, the - // existence of the project/repo. - ex existence +func (sc *sourceCoordinator) getSourceGatewayFor(ctx context.Context, id ProjectIdentifier) (*sourceGateway, error) { + if sc.supervisor.getLifetimeContext().Err() != nil { + return nil, errors.New("sourceCoordinator has been terminated") + } - // ProjectAnalyzer used to fulfill getManifestAndLock - an ProjectAnalyzer + normalizedName := id.normalizedSource() - // The project metadata cache. This is (or is intended to be) persisted to - // disk, for reuse across solver runs. - dc *sourceMetaCache + sc.srcmut.RLock() + if url, has := sc.nameToURL[normalizedName]; has { + srcGate, has := sc.srcs[url] + sc.srcmut.RUnlock() + if has { + return srcGate, nil + } + panic(fmt.Sprintf("%q was URL for %q in nameToURL, but no corresponding srcGate in srcs map", url, normalizedName)) + } + sc.srcmut.RUnlock() - // lvfunc allows the other vcs source types that embed this type to inject - // their listVersions func into the baseSource, for use as needed. - lvfunc func() (vlist []Version, err error) + // No gateway exists for this path yet; set up a proto, being careful to fold + // together simultaneous attempts on the same path. + rc := srcReturnChans{ + ret: make(chan *sourceGateway), + err: make(chan error), + } - // Mutex to ensure only one listVersions runs at a time - // - // TODO(sdboyer) this is a horrible one-off hack, and must be removed once - // source managers are refactored to properly serialize and fold-in calls to - // these methods. - lvmut sync.Mutex + // The rest of the work needs its own goroutine, the results of which will + // be re-joined to this call via the return chans. + go sc.setUpSourceGateway(ctx, normalizedName, rc) + return rc.awaitReturn() +} - // Once-er to control access to syncLocal - synconce sync.Once +// Not intended to be called externally - call getSourceGatewayFor instead. +func (sc *sourceCoordinator) setUpSourceGateway(ctx context.Context, normalizedName string, rc srcReturnChans) { + sc.psrcmut.Lock() + if chans, has := sc.protoSrcs[normalizedName]; has { + // Another goroutine is already working on this normalizedName. Fold + // in with that work by attaching our return channels to the list. + sc.protoSrcs[normalizedName] = append(chans, rc) + sc.psrcmut.Unlock() + return + } - // The error, if any, that occurred on syncLocal - syncerr error + sc.protoSrcs[normalizedName] = []srcReturnChans{rc} + sc.psrcmut.Unlock() - // Whether the cache has the latest info on versions - cvsync bool -} + doReturn := func(sg *sourceGateway, err error) { + sc.psrcmut.Lock() + if sg != nil { + for _, rc := range sc.protoSrcs[normalizedName] { + rc.ret <- sg + } + } else if err != nil { + for _, rc := range sc.protoSrcs[normalizedName] { + rc.err <- err + } + } else { + panic("sg and err both nil") + } -func (bs *baseVCSSource) getManifestAndLock(r ProjectRoot, v Version) (Manifest, Lock, error) { - if err := bs.ensureCacheExistence(); err != nil { - return nil, nil, err + delete(sc.protoSrcs, normalizedName) + sc.psrcmut.Unlock() } - rev, err := bs.toRevOrErr(v) + pd, err := sc.deducer.deduceRootPath(ctx, normalizedName) if err != nil { - return nil, nil, err + // As in the deducer, don't cache errors so that externally-driven retry + // strategies can be constructed. + doReturn(nil, err) + return } - // Return the info from the cache, if we already have it - if pi, exists := bs.dc.infos[rev]; exists { - return pi.Manifest, pi.Lock, nil + // It'd be quite the feat - but not impossible - for a gateway + // corresponding to this normalizedName to have slid into the main + // sources map after the initial unlock, but before this goroutine got + // scheduled. Guard against that by checking the main sources map again + // and bailing out if we find an entry. + var srcGate *sourceGateway + sc.srcmut.RLock() + if url, has := sc.nameToURL[normalizedName]; has { + if srcGate, has := sc.srcs[url]; has { + sc.srcmut.RUnlock() + doReturn(srcGate, nil) + return + } + panic(fmt.Sprintf("%q was URL for %q in nameToURL, but no corresponding srcGate in srcs map", url, normalizedName)) } + sc.srcmut.RUnlock() - // Cache didn't help; ensure our local is fully up to date. - do := func() (err error) { - bs.crepo.mut.Lock() - // Always prefer a rev, if it's available - if pv, ok := v.(PairedVersion); ok { - err = bs.crepo.r.UpdateVersion(pv.Underlying().String()) - } else { - err = bs.crepo.r.UpdateVersion(v.String()) - } + srcGate = newSourceGateway(pd.mb, sc.supervisor, sc.cachedir) - bs.crepo.mut.Unlock() + // The normalized name is usually different from the source URL- e.g. + // github.com/sdboyer/gps vs. https://github.com/sdboyer/gps. But it's + // possible to arrive here with a full URL as the normalized name - and + // both paths *must* lead to the same sourceGateway instance in order to + // ensure disk access is correctly managed. + // + // Therefore, we now must query the sourceGateway to get the actual + // sourceURL it's operating on, and ensure it's *also* registered at + // that path in the map. This will cause it to actually initiate the + // maybeSource.try() behavior in order to settle on a URL. + url, err := srcGate.sourceURL(ctx) + if err != nil { + doReturn(nil, err) return } - if err = do(); err != nil { - // minimize network activity: only force local syncing if we had an err - err = bs.syncLocal() - if err != nil { - return nil, nil, err - } + // We know we have a working srcGateway at this point, and need to + // integrate it back into the main map. + sc.srcmut.Lock() + defer sc.srcmut.Unlock() + // Record the name -> URL mapping, even if it's a self-mapping. + sc.nameToURL[normalizedName] = url - if err = do(); err != nil { - // TODO(sdboyer) More-er proper-er error - panic(fmt.Sprintf("canary - why is checkout/whatever failing: %s %s %s", bs.crepo.r.LocalPath(), v.String(), unwrapVcsErr(err))) - } + if sa, has := sc.srcs[url]; has { + // URL already had an entry in the main map; use that as the result. + doReturn(sa, nil) + return } - bs.crepo.mut.RLock() - m, l, err := bs.an.DeriveManifestAndLock(bs.crepo.r.LocalPath(), r) - // TODO(sdboyer) cache results - bs.crepo.mut.RUnlock() + sc.srcs[url] = srcGate + doReturn(srcGate, nil) +} - if err == nil { - if l != nil { - l = prepLock(l) - } +// sourceGateways manage all incoming calls for data from sources, serializing +// and caching them as needed. +type sourceGateway struct { + cachedir string + maybe maybeSource + srcState sourceState + src source + cache singleSourceCache + mu sync.Mutex // global lock, serializes all behaviors + suprvsr *supervisor +} - // If m is nil, prepManifest will provide an empty one. - pi := projectInfo{ - Manifest: prepManifest(m), - Lock: l, - } +func newSourceGateway(maybe maybeSource, superv *supervisor, cachedir string) *sourceGateway { + sg := &sourceGateway{ + maybe: maybe, + cachedir: cachedir, + suprvsr: superv, + } + sg.cache = sg.createSingleSourceCache() - bs.dc.infos[rev] = pi + return sg +} - return pi.Manifest, pi.Lock, nil - } +func (sg *sourceGateway) syncLocal(ctx context.Context) error { + sg.mu.Lock() + defer sg.mu.Unlock() - return nil, nil, unwrapVcsErr(err) + _, err := sg.require(ctx, sourceIsSetUp|sourceExistsLocally|sourceHasLatestLocally) + return err } -// toRevision turns a Version into a Revision, if doing so is possible based on -// the information contained in the version itself, or in the cache maps. -func (dc *sourceMetaCache) toRevision(v Version) Revision { - switch t := v.(type) { - case Revision: - return t - case PairedVersion: - return t.Underlying() - case UnpairedVersion: - // This will return the empty rev (empty string) if we don't have a - // record of it. It's up to the caller to decide, for example, if - // it's appropriate to update the cache. - return dc.vMap[t] - default: - panic(fmt.Sprintf("Unknown version type %T", v)) +func (sg *sourceGateway) existsInCache(ctx context.Context) bool { + sg.mu.Lock() + defer sg.mu.Unlock() + + _, err := sg.require(ctx, sourceIsSetUp|sourceExistsLocally) + if err != nil { + return false } + + return sg.srcState&sourceExistsLocally != 0 } -// toUnpaired turns a Version into an UnpairedVersion, if doing so is possible -// based on the information contained in the version itself, or in the cache -// maps. -// -// If the input is a revision and multiple UnpairedVersions are associated with -// it, whatever happens to be the first is returned. -func (dc *sourceMetaCache) toUnpaired(v Version) UnpairedVersion { - switch t := v.(type) { - case UnpairedVersion: - return t - case PairedVersion: - return t.Unpair() - case Revision: - if upv, has := dc.rMap[t]; has && len(upv) > 0 { - return upv[0] - } - return nil - default: - panic(fmt.Sprintf("unknown version type %T", v)) +func (sg *sourceGateway) existsUpstream(ctx context.Context) bool { + sg.mu.Lock() + defer sg.mu.Unlock() + + _, err := sg.require(ctx, sourceIsSetUp|sourceExistsUpstream) + if err != nil { + return false } + + return sg.srcState&sourceExistsUpstream != 0 } -func (bs *baseVCSSource) revisionPresentIn(r Revision) (bool, error) { - // First and fastest path is to check the data cache to see if the rev is - // present. This could give us false positives, but the cases where that can - // occur would require a type of cache staleness that seems *exceedingly* - // unlikely to occur. - if _, has := bs.dc.infos[r]; has { - return true, nil - } else if _, has := bs.dc.rMap[r]; has { - return true, nil +func (sg *sourceGateway) exportVersionTo(ctx context.Context, v Version, to string) error { + sg.mu.Lock() + defer sg.mu.Unlock() + + _, err := sg.require(ctx, sourceIsSetUp|sourceExistsLocally) + if err != nil { + return err } - err := bs.ensureCacheExistence() + r, err := sg.convertToRevision(ctx, v) if err != nil { - return false, err + return err } - bs.crepo.mut.RLock() - defer bs.crepo.mut.RUnlock() - return bs.crepo.r.IsReference(string(r)), nil + return sg.suprvsr.do(ctx, sg.src.upstreamURL(), ctExportTree, func(ctx context.Context) error { + return sg.src.exportRevisionTo(ctx, r, to) + }) } -func (bs *baseVCSSource) ensureCacheExistence() error { - // Technically, methods could could attempt to return straight from the - // metadata cache even if the repo cache doesn't exist on disk. But that - // would allow weird state inconsistencies (cache exists, but no repo...how - // does that even happen?) that it'd be better to just not allow so that we - // don't have to think about it elsewhere - if !bs.checkExistence(existsInCache) { - if bs.checkExistence(existsUpstream) { - bs.crepo.mut.Lock() - if bs.crepo.synced { - // A second ensure call coming in while the first is completing - // isn't terribly unlikely, especially for a large repo. In that - // event, the synced flag will have flipped on by the time we - // acquire the lock. If it has, there's no need to do this work - // twice. - bs.crepo.mut.Unlock() - return nil - } +func (sg *sourceGateway) getManifestAndLock(ctx context.Context, pr ProjectRoot, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { + sg.mu.Lock() + defer sg.mu.Unlock() - err := bs.crepo.r.Get() + r, err := sg.convertToRevision(ctx, v) + if err != nil { + return nil, nil, err + } - if err != nil { - bs.crepo.mut.Unlock() - return fmt.Errorf("failed to create repository cache for %s with err:\n%s", bs.crepo.r.Remote(), unwrapVcsErr(err)) - } + m, l, has := sg.cache.getManifestAndLock(r, an) + if has { + return m, l, nil + } - bs.crepo.synced = true - bs.ex.s |= existsInCache - bs.ex.f |= existsInCache - bs.crepo.mut.Unlock() - } else { - return fmt.Errorf("project %s does not exist upstream", bs.crepo.r.Remote()) - } + _, err = sg.require(ctx, sourceIsSetUp|sourceExistsLocally) + if err != nil { + return nil, nil, err + } + + name, vers := an.Info() + label := fmt.Sprintf("%s:%s.%v", sg.src.upstreamURL(), name, vers) + err = sg.suprvsr.do(ctx, label, ctGetManifestAndLock, func(ctx context.Context) error { + m, l, err = sg.src.getManifestAndLock(ctx, pr, r, an) + return err + }) + if err != nil { + return nil, nil, err } - return nil + sg.cache.setManifestAndLock(r, an, m, l) + return m, l, nil } -// checkExistence provides a direct method for querying existence levels of the -// source. It will only perform actual searching (local fs or over the network) -// if no previous attempt at that search has been made. -// -// Note that this may perform read-ish operations on the cache repo, and it -// takes a lock accordingly. This makes it unsafe to call from a segment where -// the cache repo mutex is already write-locked, as deadlock will occur. -func (bs *baseVCSSource) checkExistence(ex sourceExistence) bool { - if bs.ex.s&ex != ex { - if ex&existsInVendorRoot != 0 && bs.ex.s&existsInVendorRoot == 0 { - panic("should now be implemented in bridge") - } - if ex&existsInCache != 0 && bs.ex.s&existsInCache == 0 { - bs.crepo.mut.RLock() - bs.ex.s |= existsInCache - if bs.crepo.r.CheckLocal() { - bs.ex.f |= existsInCache - } - bs.crepo.mut.RUnlock() - } - if ex&existsUpstream != 0 && bs.ex.s&existsUpstream == 0 { - bs.crepo.mut.RLock() - bs.ex.s |= existsUpstream - if bs.crepo.r.Ping() { - bs.ex.f |= existsUpstream - } - bs.crepo.mut.RUnlock() - } +// FIXME ProjectRoot input either needs to parameterize the cache, or be +// incorporated on the fly on egress...? +func (sg *sourceGateway) listPackages(ctx context.Context, pr ProjectRoot, v Version) (pkgtree.PackageTree, error) { + sg.mu.Lock() + defer sg.mu.Unlock() + + r, err := sg.convertToRevision(ctx, v) + if err != nil { + return pkgtree.PackageTree{}, err + } + + ptree, has := sg.cache.getPackageTree(r) + if has { + return ptree, nil } - return ex&bs.ex.f == ex + _, err = sg.require(ctx, sourceIsSetUp|sourceExistsLocally) + if err != nil { + return pkgtree.PackageTree{}, err + } + + label := fmt.Sprintf("%s:%s", pr, sg.src.upstreamURL()) + err = sg.suprvsr.do(ctx, label, ctListPackages, func(ctx context.Context) error { + ptree, err = sg.src.listPackages(ctx, pr, r) + return err + }) + if err != nil { + return pkgtree.PackageTree{}, err + } + + sg.cache.setPackageTree(r, ptree) + return ptree, nil } -// syncLocal ensures the local data we have about the source is fully up to date -// with what's out there over the network. -func (bs *baseVCSSource) syncLocal() error { - // Ensure we only have one goroutine doing this at a time - f := func() { - // First, ensure the local instance exists - bs.syncerr = bs.ensureCacheExistence() - if bs.syncerr != nil { - return - } +func (sg *sourceGateway) convertToRevision(ctx context.Context, v Version) (Revision, error) { + // When looking up by Version, there are four states that may have + // differing opinions about version->revision mappings: + // + // 1. The upstream source/repo (canonical) + // 2. The local source/repo + // 3. The local cache + // 4. The input (params to this method) + // + // If the input differs from any of the above, it's likely because some lock + // got written somewhere with a version/rev pair that has since changed or + // been removed. But correct operation dictates that such a mis-mapping be + // respected; if the mis-mapping is to be corrected, it has to be done + // intentionally by the caller, not automatically here. + r, has := sg.cache.toRevision(v) + if has { + return r, nil + } - _, bs.syncerr = bs.lvfunc() - if bs.syncerr != nil { - return - } + if sg.srcState&sourceHasLatestVersionList != 0 { + // We have the latest version list already and didn't get a match, so + // this is definitely a failure case. + return "", fmt.Errorf("version %q does not exist in source", v) + } - // This case is really just for git repos, where the lvfunc doesn't - // guarantee that the local repo is synced - if !bs.crepo.synced { - bs.crepo.mut.Lock() - err := bs.crepo.r.Update() - if err != nil { - bs.syncerr = fmt.Errorf("failed fetching latest updates with err: %s", unwrapVcsErr(err)) - } else { - bs.crepo.synced = true - } - bs.crepo.mut.Unlock() - } + // The version list is out of date; it's possible this version might + // show up after loading it. + _, err := sg.require(ctx, sourceIsSetUp|sourceHasLatestVersionList) + if err != nil { + return "", err + } + + r, has = sg.cache.toRevision(v) + if !has { + return "", fmt.Errorf("version %q does not exist in source", v) } - bs.synconce.Do(f) - return bs.syncerr + return r, nil } -func (bs *baseVCSSource) listPackages(pr ProjectRoot, v Version) (ptree pkgtree.PackageTree, err error) { - if err = bs.ensureCacheExistence(); err != nil { - return +func (sg *sourceGateway) listVersions(ctx context.Context) ([]PairedVersion, error) { + sg.mu.Lock() + defer sg.mu.Unlock() + + // TODO(sdboyer) The problem here is that sourceExistsUpstream may not be + // sufficient (e.g. bzr, hg), but we don't want to force local b/c git + // doesn't need it + _, err := sg.require(ctx, sourceIsSetUp|sourceExistsUpstream|sourceHasLatestVersionList) + if err != nil { + return nil, err } - var r Revision - if r, err = bs.toRevOrErr(v); err != nil { - return + return sg.cache.getAllVersions(), nil +} + +func (sg *sourceGateway) revisionPresentIn(ctx context.Context, r Revision) (bool, error) { + sg.mu.Lock() + defer sg.mu.Unlock() + + _, err := sg.require(ctx, sourceIsSetUp|sourceExistsLocally) + if err != nil { + return false, err } - // Return the ptree from the cache, if we already have it - var exists bool - if ptree, exists = bs.dc.ptrees[r]; exists { - return + if _, exists := sg.cache.getVersionsFor(r); exists { + return true, nil } - // Not in the cache; check out the version and do the analysis - bs.crepo.mut.Lock() - // Check out the desired version for analysis - if r != "" { - // Always prefer a rev, if it's available - err = bs.crepo.r.UpdateVersion(string(r)) - } else { - // If we don't have a rev, ensure the repo is up to date, otherwise we - // could have a desync issue - if !bs.crepo.synced { - err = bs.crepo.r.Update() - if err != nil { - err = fmt.Errorf("could not fetch latest updates into repository: %s", unwrapVcsErr(err)) - return - } - bs.crepo.synced = true - } - err = bs.crepo.r.UpdateVersion(v.String()) + present, err := sg.src.revisionPresentIn(r) + if err == nil && present { + sg.cache.markRevisionExists(r) } + return present, err +} - if err == nil { - ptree, err = pkgtree.ListPackages(bs.crepo.r.LocalPath(), string(pr)) - // TODO(sdboyer) cache errs? - if err == nil { - bs.dc.ptrees[r] = ptree - } - } else { - err = unwrapVcsErr(err) +func (sg *sourceGateway) sourceURL(ctx context.Context) (string, error) { + sg.mu.Lock() + defer sg.mu.Unlock() + + _, err := sg.require(ctx, sourceIsSetUp) + if err != nil { + return "", err } - bs.crepo.mut.Unlock() - return + return sg.src.upstreamURL(), nil } -// toRevOrErr makes all efforts to convert a Version into a rev, including -// updating the cache repo (if needed). It does not guarantee that the returned -// Revision actually exists in the repository (as one of the cheaper methods may -// have had bad data). -func (bs *baseVCSSource) toRevOrErr(v Version) (r Revision, err error) { - r = bs.dc.toRevision(v) - if r == "" { - // Rev can be empty if: - // - The cache is unsynced - // - A version was passed that used to exist, but no longer does - // - A garbage version was passed. (Functionally indistinguishable from - // the previous) - if !bs.cvsync { - // call the lvfunc to sync the meta cache - _, err = bs.lvfunc() +// createSingleSourceCache creates a singleSourceCache instance for use by +// the encapsulated source. +func (sg *sourceGateway) createSingleSourceCache() singleSourceCache { + // TODO(sdboyer) when persistent caching is ready, just drop in the creation + // of a source-specific handle here + return newMemoryCache() +} + +func (sg *sourceGateway) require(ctx context.Context, wanted sourceState) (errState sourceState, err error) { + todo := (^sg.srcState) & wanted + var flag sourceState = 1 + + for todo != 0 { + if todo&flag != 0 { + // Assign the currently visited bit to errState so that we can + // return easily later. + // + // Also set up addlState so that individual ops can easily attach + // more states that were incidentally satisfied by the op. + errState = flag + var addlState sourceState + + switch flag { + case sourceIsSetUp: + sg.src, addlState, err = sg.maybe.try(ctx, sg.cachedir, sg.cache, sg.suprvsr) + case sourceExistsUpstream: + err = sg.suprvsr.do(ctx, sg.src.sourceType(), ctSourcePing, func(ctx context.Context) error { + if !sg.src.existsUpstream(ctx) { + return fmt.Errorf("%s does not exist upstream", sg.src.upstreamURL()) + } + return nil + }) + case sourceExistsLocally: + if !sg.src.existsLocally(ctx) { + err = sg.suprvsr.do(ctx, sg.src.sourceType(), ctSourceInit, func(ctx context.Context) error { + return sg.src.initLocal(ctx) + }) + + if err == nil { + addlState |= sourceHasLatestLocally + } else { + err = fmt.Errorf("%s does not exist in the local cache and fetching failed: %s", sg.src.upstreamURL(), err) + } + } + case sourceHasLatestVersionList: + var pvl []PairedVersion + err = sg.suprvsr.do(ctx, sg.src.sourceType(), ctListVersions, func(ctx context.Context) error { + pvl, err = sg.src.listVersions(ctx) + return err + }) + + if err != nil { + sg.cache.storeVersionMap(pvl, true) + } + case sourceHasLatestLocally: + err = sg.suprvsr.do(ctx, sg.src.sourceType(), ctSourceFetch, func(ctx context.Context) error { + return sg.src.updateLocal(ctx) + }) + } + if err != nil { return } - } - r = bs.dc.toRevision(v) - // If we still don't have a rev, then the version's no good - if r == "" { - err = fmt.Errorf("version %s does not exist in source %s", v, bs.crepo.r.Remote()) + checked := flag | addlState + sg.srcState |= checked + todo &= ^checked } - } - return -} - -func (bs *baseVCSSource) exportVersionTo(v Version, to string) error { - if err := bs.ensureCacheExistence(); err != nil { - return err + flag <<= 1 } - // Only make the parent dir, as the general implementation will balk on - // trying to write to an empty but existing dir. - if err := os.MkdirAll(filepath.Dir(to), 0777); err != nil { - return err - } + return 0, nil +} - return bs.crepo.exportVersionTo(v, to) +// source is an abstraction around the different underlying types (git, bzr, hg, +// svn, maybe raw on-disk code, and maybe eventually a registry) that can +// provide versioned project source trees. +type source interface { + existsLocally(context.Context) bool + existsUpstream(context.Context) bool + upstreamURL() string + initLocal(context.Context) error + updateLocal(context.Context) error + listVersions(context.Context) ([]PairedVersion, error) + getManifestAndLock(context.Context, ProjectRoot, Revision, ProjectAnalyzer) (Manifest, Lock, error) + listPackages(context.Context, ProjectRoot, Revision) (pkgtree.PackageTree, error) + revisionPresentIn(Revision) (bool, error) + exportRevisionTo(context.Context, Revision, string) error + sourceType() string } diff --git a/vendor/github.com/sdboyer/gps/source_cache.go b/vendor/github.com/sdboyer/gps/source_cache.go new file mode 100644 index 0000000000..68e7d7b662 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/source_cache.go @@ -0,0 +1,219 @@ +package gps + +import ( + "fmt" + "sync" + + "github.com/sdboyer/gps/pkgtree" +) + +// singleSourceCache provides a method set for storing and retrieving data about +// a single source. +type singleSourceCache interface { + // Store the manifest and lock information for a given revision, as defined by + // a particular ProjectAnalyzer. + setManifestAndLock(Revision, ProjectAnalyzer, Manifest, Lock) + + // Get the manifest and lock information for a given revision, as defined by + // a particular ProjectAnalyzer. + getManifestAndLock(Revision, ProjectAnalyzer) (Manifest, Lock, bool) + + // Store a PackageTree for a given revision. + setPackageTree(Revision, pkgtree.PackageTree) + + // Get the PackageTree for a given revision. + getPackageTree(Revision) (pkgtree.PackageTree, bool) + + // Indicate to the cache that an individual revision is known to exist. + markRevisionExists(r Revision) + + // Store the mappings between a set of PairedVersions' surface versions + // their corresponding revisions. + // + // If flush is true, the existing list of versions will be purged before + // writing. Revisions will have their pairings purged, but record of the + // revision existing will be kept, on the assumption that revisions are + // immutable and permanent. + storeVersionMap(versionList []PairedVersion, flush bool) + + // Get the list of unpaired versions corresponding to the given revision. + getVersionsFor(Revision) ([]UnpairedVersion, bool) + + // Gets all the version pairs currently known to the cache. + getAllVersions() []PairedVersion + + // Get the revision corresponding to the given unpaired version. + getRevisionFor(UnpairedVersion) (Revision, bool) + + // Attempt to convert the given Version to a Revision, given information + // currently present in the cache, and in the Version itself. + toRevision(v Version) (Revision, bool) + + // Attempt to convert the given Version to an UnpairedVersion, given + // information currently present in the cache, or in the Version itself. + // + // If the input is a revision and multiple UnpairedVersions are associated + // with it, whatever happens to be the first is returned. + toUnpaired(v Version) (UnpairedVersion, bool) +} + +type singleSourceCacheMemory struct { + mut sync.RWMutex // protects all maps + infos map[ProjectAnalyzer]map[Revision]projectInfo + ptrees map[Revision]pkgtree.PackageTree + vMap map[UnpairedVersion]Revision + rMap map[Revision][]UnpairedVersion +} + +func newMemoryCache() singleSourceCache { + return &singleSourceCacheMemory{ + infos: make(map[ProjectAnalyzer]map[Revision]projectInfo), + ptrees: make(map[Revision]pkgtree.PackageTree), + vMap: make(map[UnpairedVersion]Revision), + rMap: make(map[Revision][]UnpairedVersion), + } +} + +type projectInfo struct { + Manifest + Lock +} + +func (c *singleSourceCacheMemory) setManifestAndLock(r Revision, an ProjectAnalyzer, m Manifest, l Lock) { + c.mut.Lock() + inner, has := c.infos[an] + if !has { + inner = make(map[Revision]projectInfo) + c.infos[an] = inner + } + inner[r] = projectInfo{Manifest: m, Lock: l} + + // Ensure there's at least an entry in the rMap so that the rMap always has + // a complete picture of the revisions we know to exist + if _, has = c.rMap[r]; !has { + c.rMap[r] = nil + } + c.mut.Unlock() +} + +func (c *singleSourceCacheMemory) getManifestAndLock(r Revision, an ProjectAnalyzer) (Manifest, Lock, bool) { + c.mut.Lock() + defer c.mut.Unlock() + + inner, has := c.infos[an] + if !has { + return nil, nil, false + } + + pi, has := inner[r] + if has { + return pi.Manifest, pi.Lock, true + } + return nil, nil, false +} + +func (c *singleSourceCacheMemory) setPackageTree(r Revision, ptree pkgtree.PackageTree) { + c.mut.Lock() + c.ptrees[r] = ptree + + // Ensure there's at least an entry in the rMap so that the rMap always has + // a complete picture of the revisions we know to exist + if _, has := c.rMap[r]; !has { + c.rMap[r] = nil + } + c.mut.Unlock() +} + +func (c *singleSourceCacheMemory) getPackageTree(r Revision) (pkgtree.PackageTree, bool) { + c.mut.Lock() + ptree, has := c.ptrees[r] + c.mut.Unlock() + return ptree, has +} + +func (c *singleSourceCacheMemory) storeVersionMap(versionList []PairedVersion, flush bool) { + c.mut.Lock() + if flush { + // TODO(sdboyer) how do we handle cache consistency here - revs that may + // be out of date vis-a-vis the ptrees or infos maps? + for r := range c.rMap { + c.rMap[r] = nil + } + + c.vMap = make(map[UnpairedVersion]Revision) + } + + for _, v := range versionList { + pv := v.(PairedVersion) + u, r := pv.Unpair(), pv.Underlying() + c.vMap[u] = r + c.rMap[r] = append(c.rMap[r], u) + } + c.mut.Unlock() +} + +func (c *singleSourceCacheMemory) markRevisionExists(r Revision) { + c.mut.Lock() + if _, has := c.rMap[r]; !has { + c.rMap[r] = nil + } + c.mut.Unlock() +} + +func (c *singleSourceCacheMemory) getVersionsFor(r Revision) ([]UnpairedVersion, bool) { + c.mut.Lock() + versionList, has := c.rMap[r] + c.mut.Unlock() + return versionList, has +} + +func (c *singleSourceCacheMemory) getAllVersions() []PairedVersion { + vlist := make([]PairedVersion, 0, len(c.vMap)) + for v, r := range c.vMap { + vlist = append(vlist, v.Is(r)) + } + return vlist +} + +func (c *singleSourceCacheMemory) getRevisionFor(uv UnpairedVersion) (Revision, bool) { + c.mut.Lock() + r, has := c.vMap[uv] + c.mut.Unlock() + return r, has +} + +func (c *singleSourceCacheMemory) toRevision(v Version) (Revision, bool) { + switch t := v.(type) { + case Revision: + return t, true + case PairedVersion: + return t.Underlying(), true + case UnpairedVersion: + c.mut.Lock() + r, has := c.vMap[t] + c.mut.Unlock() + return r, has + default: + panic(fmt.Sprintf("Unknown version type %T", v)) + } +} + +func (c *singleSourceCacheMemory) toUnpaired(v Version) (UnpairedVersion, bool) { + switch t := v.(type) { + case UnpairedVersion: + return t, true + case PairedVersion: + return t.Unpair(), true + case Revision: + c.mut.Lock() + upv, has := c.rMap[t] + c.mut.Unlock() + + if has && len(upv) > 0 { + return upv[0], true + } + return nil, false + default: + panic(fmt.Sprintf("unknown version type %T", v)) + } +} diff --git a/vendor/github.com/sdboyer/gps/source_manager.go b/vendor/github.com/sdboyer/gps/source_manager.go index 2c10d15861..d19f10a3db 100644 --- a/vendor/github.com/sdboyer/gps/source_manager.go +++ b/vendor/github.com/sdboyer/gps/source_manager.go @@ -1,6 +1,7 @@ package gps import ( + "context" "fmt" "os" "os/signal" @@ -11,13 +12,12 @@ import ( "sync/atomic" "time" + "github.com/sdboyer/constext" "github.com/sdboyer/gps/pkgtree" ) -// Used to compute a friendly filepath from a URL-shaped input -// -// TODO(sdboyer) this is awful. Right? -var sanitizer = strings.NewReplacer(":", "-", "/", "-", "+", "-") +// Used to compute a friendly filepath from a URL-shaped input. +var sanitizer = strings.NewReplacer("-", "--", ":", "-", "/", "-", "+", "-") // A SourceManager is responsible for retrieving, managing, and interrogating // source repositories. Its primary purpose is to serve the needs of a Solver, @@ -37,7 +37,8 @@ type SourceManager interface { // ListVersions retrieves a list of the available versions for a given // repository name. - ListVersions(ProjectIdentifier) ([]Version, error) + // TODO convert to []PairedVersion + ListVersions(ProjectIdentifier) ([]PairedVersion, error) // RevisionPresentIn indicates whether the provided Version is present in // the given repository. @@ -53,19 +54,20 @@ type SourceManager interface { // gps currently requires that projects be rooted at their repository root, // necessitating that the ProjectIdentifier's ProjectRoot must also be a // repository root. - GetManifestAndLock(ProjectIdentifier, Version) (Manifest, Lock, error) + GetManifestAndLock(ProjectIdentifier, Version, ProjectAnalyzer) (Manifest, Lock, error) // ExportProject writes out the tree of the provided import path, at the // provided version, to the provided directory. ExportProject(ProjectIdentifier, Version, string) error - // AnalyzerInfo reports the name and version of the logic used to service - // GetManifestAndLock(). - AnalyzerInfo() (name string, version int) - // DeduceRootProject takes an import path and deduces the corresponding // project/source root. DeduceProjectRoot(ip string) (ProjectRoot, error) + + // Release lets go of any locks held by the SourceManager. Once called, it is + // no longer safe to call methods against it; all method calls will + // immediately result in errors. + Release() } // A ProjectAnalyzer is responsible for analyzing a given path for Manifest and @@ -85,21 +87,16 @@ type ProjectAnalyzer interface { // There's no (planned) reason why it would need to be reimplemented by other // tools; control via dependency injection is intended to be sufficient. type SourceMgr struct { - cachedir string // path to root of cache dir - lf *os.File // handle for the sm lock file on disk - srcs map[string]source // map of path names to source obj - srcmut sync.RWMutex // mutex protecting srcs map - srcfuts map[string]*unifiedFuture // map of paths to source-handling futures - srcfmut sync.RWMutex // mutex protecting futures map - an ProjectAnalyzer // analyzer injected by the caller - dxt *deducerTrie // static trie with baseline source type deduction info - rootxt *prTrie // dynamic trie, updated as ProjectRoots are deduced - qch chan struct{} // quit chan for signal handler - sigmut sync.Mutex // mutex protecting signal handling setup/teardown - glock sync.RWMutex // global lock for all ops, sm validity - opcount int32 // number of ops in flight - relonce sync.Once // once-er to ensure we only release once - releasing int32 // flag indicating release of sm has begun + cachedir string // path to root of cache dir + lf *os.File // handle for the sm lock file on disk + suprvsr *supervisor // subsystem that supervises running calls/io + cancelAll context.CancelFunc // cancel func to kill all running work + deduceCoord *deductionCoordinator // subsystem that manages import path deduction + srcCoord *sourceCoordinator // subsystem that manages sources + sigmut sync.Mutex // mutex protecting signal handling setup/teardown + qch chan struct{} // quit chan for signal handler + relonce sync.Once // once-er to ensure we only release once + releasing int32 // flag indicating release of sm has begun } type smIsReleased struct{} @@ -108,18 +105,11 @@ func (smIsReleased) Error() string { return "this SourceMgr has been released, its methods can no longer be called" } -type unifiedFuture struct { - rc, sc chan struct{} - rootf stringFuture - srcf sourceFuture -} - var _ SourceManager = &SourceMgr{} // NewSourceManager produces an instance of gps's built-in SourceManager. It -// takes a cache directory (where local instances of upstream repositories are -// stored), and a ProjectAnalyzer that is used to extract manifest and lock -// information from source trees. +// takes a cache directory, where local instances of upstream sources are +// stored. // // The returned SourceManager aggressively caches information wherever possible. // If tools need to do preliminary work involving upstream repository analysis @@ -130,11 +120,7 @@ var _ SourceManager = &SourceMgr{} // gps's SourceManager is intended to be threadsafe (if it's not, please file a // bug!). It should be safe to reuse across concurrent solving runs, even on // unrelated projects. -func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) { - if an == nil { - return nil, fmt.Errorf("a ProjectAnalyzer must be provided to the SourceManager") - } - +func NewSourceManager(cachedir string) (*SourceMgr, error) { err := os.MkdirAll(filepath.Join(cachedir, "sources"), 0777) if err != nil { return nil, err @@ -157,15 +143,18 @@ func NewSourceManager(an ProjectAnalyzer, cachedir string) (*SourceMgr, error) { } } + ctx, cf := context.WithCancel(context.TODO()) + superv := newSupervisor(ctx) + deducer := newDeductionCoordinator(superv) + sm := &SourceMgr{ - cachedir: cachedir, - lf: fi, - srcs: make(map[string]source), - srcfuts: make(map[string]*unifiedFuture), - an: an, - dxt: pathDeducerTrie(), - rootxt: newProjectRootTrie(), - qch: make(chan struct{}), + cachedir: cachedir, + lf: fi, + suprvsr: superv, + cancelAll: cf, + deduceCoord: deducer, + srcCoord: newSourceCoordinator(superv, deducer, cachedir), + qch: make(chan struct{}), } return sm, nil @@ -221,7 +210,7 @@ func (sm *SourceMgr) HandleSignals(sigch chan os.Signal) { return } - opc := atomic.LoadInt32(&sm.opcount) + opc := sm.suprvsr.count() if opc > 0 { fmt.Printf("Signal received: waiting for %v ops to complete...\n", opc) } @@ -293,53 +282,36 @@ func (sm *SourceMgr) Release() { // This must be called only and exactly once. Calls to it should be wrapped in // the sm.relonce sync.Once instance. func (sm *SourceMgr) doRelease() { - // Grab the global sm lock so that we only release once we're sure all other - // calls have completed - // - // (This could deadlock, ofc) - sm.glock.Lock() + // Send the signal to the supervisor to cancel all running calls + sm.cancelAll() + sm.suprvsr.wait() - // Close the file handle for the lock file + // Close the file handle for the lock file and remove it from disk sm.lf.Close() - // Remove the lock file from disk os.Remove(filepath.Join(sm.cachedir, "sm.lock")) + // Close the qch, if non-nil, so the signal handlers run out. This will // also deregister the sig channel, if any has been set up. if sm.qch != nil { close(sm.qch) } - sm.glock.Unlock() -} - -// AnalyzerInfo reports the name and version of the injected ProjectAnalyzer. -func (sm *SourceMgr) AnalyzerInfo() (name string, version int) { - return sm.an.Info() } // GetManifestAndLock returns manifest and lock information for the provided -// import path. gps currently requires that projects be rooted at their -// repository root, necessitating that the ProjectIdentifier's ProjectRoot must -// also be a repository root. -// -// The work of producing the manifest and lock is delegated to the injected -// ProjectAnalyzer's DeriveManifestAndLock() method. -func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version) (Manifest, Lock, error) { +// ProjectIdentifier, at the provided Version. The work of producing the +// manifest and lock is delegated to the provided ProjectAnalyzer's +// DeriveManifestAndLock() method. +func (sm *SourceMgr) GetManifestAndLock(id ProjectIdentifier, v Version, an ProjectAnalyzer) (Manifest, Lock, error) { if atomic.CompareAndSwapInt32(&sm.releasing, 1, 1) { return nil, nil, smIsReleased{} } - atomic.AddInt32(&sm.opcount, 1) - sm.glock.RLock() - defer func() { - sm.glock.RUnlock() - atomic.AddInt32(&sm.opcount, -1) - }() - - src, err := sm.getSourceFor(id) + + srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { return nil, nil, err } - return src.getManifestAndLock(id.ProjectRoot, v) + return srcg.getManifestAndLock(context.TODO(), id.ProjectRoot, v, an) } // ListPackages parses the tree of the Go packages at and below the ProjectRoot @@ -348,19 +320,13 @@ func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (pkgtree.Pack if atomic.CompareAndSwapInt32(&sm.releasing, 1, 1) { return pkgtree.PackageTree{}, smIsReleased{} } - atomic.AddInt32(&sm.opcount, 1) - sm.glock.RLock() - defer func() { - sm.glock.RUnlock() - atomic.AddInt32(&sm.opcount, -1) - }() - - src, err := sm.getSourceFor(id) + + srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { return pkgtree.PackageTree{}, err } - return src.listPackages(id.ProjectRoot, v) + return srcg.listPackages(context.TODO(), id.ProjectRoot, v) } // ListVersions retrieves a list of the available versions for a given @@ -375,24 +341,18 @@ func (sm *SourceMgr) ListPackages(id ProjectIdentifier, v Version) (pkgtree.Pack // calls will return a cached version of the first call's results. if upstream // is not accessible (network outage, access issues, or the resource actually // went away), an error will be returned. -func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]Version, error) { +func (sm *SourceMgr) ListVersions(id ProjectIdentifier) ([]PairedVersion, error) { if atomic.CompareAndSwapInt32(&sm.releasing, 1, 1) { return nil, smIsReleased{} } - atomic.AddInt32(&sm.opcount, 1) - sm.glock.RLock() - defer func() { - sm.glock.RUnlock() - atomic.AddInt32(&sm.opcount, -1) - }() - - src, err := sm.getSourceFor(id) + + srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { // TODO(sdboyer) More-er proper-er errors return nil, err } - return src.listVersions() + return srcg.listVersions(context.TODO()) } // RevisionPresentIn indicates whether the provided Revision is present in the given @@ -401,20 +361,14 @@ func (sm *SourceMgr) RevisionPresentIn(id ProjectIdentifier, r Revision) (bool, if atomic.CompareAndSwapInt32(&sm.releasing, 1, 1) { return false, smIsReleased{} } - atomic.AddInt32(&sm.opcount, 1) - sm.glock.RLock() - defer func() { - sm.glock.RUnlock() - atomic.AddInt32(&sm.opcount, -1) - }() - - src, err := sm.getSourceFor(id) + + srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { // TODO(sdboyer) More-er proper-er errors return false, err } - return src.revisionPresentIn(r) + return srcg.revisionPresentIn(context.TODO(), r) } // SourceExists checks if a repository exists, either upstream or in the cache, @@ -423,19 +377,14 @@ func (sm *SourceMgr) SourceExists(id ProjectIdentifier) (bool, error) { if atomic.CompareAndSwapInt32(&sm.releasing, 1, 1) { return false, smIsReleased{} } - atomic.AddInt32(&sm.opcount, 1) - sm.glock.RLock() - defer func() { - sm.glock.RUnlock() - atomic.AddInt32(&sm.opcount, -1) - }() - - src, err := sm.getSourceFor(id) + + srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { return false, err } - return src.checkExistence(existsInCache) || src.checkExistence(existsUpstream), nil + ctx := context.TODO() + return srcg.existsInCache(ctx) || srcg.existsUpstream(ctx), nil } // SyncSourceFor will ensure that all local caches and information about a @@ -446,19 +395,13 @@ func (sm *SourceMgr) SyncSourceFor(id ProjectIdentifier) error { if atomic.CompareAndSwapInt32(&sm.releasing, 1, 1) { return smIsReleased{} } - atomic.AddInt32(&sm.opcount, 1) - sm.glock.RLock() - defer func() { - sm.glock.RUnlock() - atomic.AddInt32(&sm.opcount, -1) - }() - - src, err := sm.getSourceFor(id) + + srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { return err } - return src.syncLocal() + return srcg.syncLocal(context.TODO()) } // ExportProject writes out the tree of the provided ProjectIdentifier's @@ -467,19 +410,13 @@ func (sm *SourceMgr) ExportProject(id ProjectIdentifier, v Version, to string) e if atomic.CompareAndSwapInt32(&sm.releasing, 1, 1) { return smIsReleased{} } - atomic.AddInt32(&sm.opcount, 1) - sm.glock.RLock() - defer func() { - sm.glock.RUnlock() - atomic.AddInt32(&sm.opcount, -1) - }() - - src, err := sm.getSourceFor(id) + + srcg, err := sm.srcCoord.getSourceGatewayFor(context.TODO(), id) if err != nil { return err } - return src.exportVersionTo(v, to) + return srcg.exportVersionTo(context.TODO(), v, to) } // DeduceProjectRoot takes an import path and deduces the corresponding @@ -493,206 +430,151 @@ func (sm *SourceMgr) DeduceProjectRoot(ip string) (ProjectRoot, error) { if atomic.CompareAndSwapInt32(&sm.releasing, 1, 1) { return "", smIsReleased{} } - atomic.AddInt32(&sm.opcount, 1) - sm.glock.RLock() - defer func() { - sm.glock.RUnlock() - atomic.AddInt32(&sm.opcount, -1) - }() - - if prefix, root, has := sm.rootxt.LongestPrefix(ip); has { - // The non-matching tail of the import path could still be malformed. - // Validate just that part, if it exists - if prefix != ip { - // TODO(sdboyer) commented until i find a proper description of how - // to validate an import path - //if !pathvld.MatchString(strings.TrimPrefix(ip, prefix+"/")) { - //return "", fmt.Errorf("%q is not a valid import path", ip) - //} - // There was one, and it validated fine - add it so we don't have to - // revalidate it later - sm.rootxt.Insert(ip, root) - } - return root, nil - } - ft, err := sm.deducePathAndProcess(ip) - if err != nil { - return "", err - } + pd, err := sm.deduceCoord.deduceRootPath(context.TODO(), ip) + return ProjectRoot(pd.root), err +} - r, err := ft.rootf() - return ProjectRoot(r), err +type timeCount struct { + count int + start time.Time } -func (sm *SourceMgr) getSourceFor(id ProjectIdentifier) (source, error) { - nn := id.normalizedSource() +type durCount struct { + count int + dur time.Duration +} - sm.srcmut.RLock() - src, has := sm.srcs[nn] - sm.srcmut.RUnlock() - if has { - return src, nil - } +type supervisor struct { + ctx context.Context + cancelFunc context.CancelFunc + mu sync.Mutex // Guards all maps + cond sync.Cond // Wraps mu so callers can wait until all calls end + running map[callInfo]timeCount + ran map[callType]durCount +} - ft, err := sm.deducePathAndProcess(nn) - if err != nil { - return nil, err +func newSupervisor(ctx context.Context) *supervisor { + ctx, cf := context.WithCancel(ctx) + supv := &supervisor{ + ctx: ctx, + cancelFunc: cf, + running: make(map[callInfo]timeCount), + ran: make(map[callType]durCount), } - // we don't care about the ident here, and the future produced by - // deducePathAndProcess will dedupe with what's in the sm.srcs map - src, _, err = ft.srcf() - return src, err + supv.cond = sync.Cond{L: &supv.mu} + return supv } -func (sm *SourceMgr) deducePathAndProcess(path string) (*unifiedFuture, error) { - // Check for an already-existing future in the map first - sm.srcfmut.RLock() - ft, exists := sm.srcfuts[path] - sm.srcfmut.RUnlock() - - if exists { - return ft, nil +// do executes the incoming closure using a conjoined context, and keeps +// counters to ensure the sourceMgr can't finish Release()ing until after all +// calls have returned. +func (sup *supervisor) do(inctx context.Context, name string, typ callType, f func(context.Context) error) error { + ci := callInfo{ + name: name, + typ: typ, } - // Don't have one - set one up. - df, err := sm.deduceFromPath(path) + octx, err := sup.start(ci) if err != nil { - return nil, err - } - - sm.srcfmut.Lock() - defer sm.srcfmut.Unlock() - // A bad interleaving could allow two goroutines to make it here for the - // same path, so we have to re-check existence. - if ft, exists = sm.srcfuts[path]; exists { - return ft, nil + return err } - ft = &unifiedFuture{ - rc: make(chan struct{}, 1), - sc: make(chan struct{}, 1), - } + cctx, cancelFunc := constext.Cons(inctx, octx) + err = f(cctx) + sup.done(ci) + cancelFunc() + return err +} - // Rewrap the rootfinding func in another future - var pr string - var rooterr error - - // Kick off the func to get root and register it into the rootxt. - rootf := func() { - defer close(ft.rc) - pr, rooterr = df.root() - if rooterr != nil { - // Don't cache errs. This doesn't really hurt the solver, and is - // beneficial for other use cases because it means we don't have to - // expose any kind of controls for clearing caches. - return - } +func (sup *supervisor) getLifetimeContext() context.Context { + return sup.ctx +} - tpr := ProjectRoot(pr) - sm.rootxt.Insert(pr, tpr) - // It's not harmful if the netname was a URL rather than an - // import path - if pr != path { - // Insert the result into the rootxt twice - once at the - // root itself, so as to catch siblings/relatives, and again - // at the exact provided import path (assuming they were - // different), so that on subsequent calls, exact matches - // can skip the regex above. - sm.rootxt.Insert(path, tpr) - } +func (sup *supervisor) start(ci callInfo) (context.Context, error) { + sup.mu.Lock() + defer sup.mu.Unlock() + if sup.ctx.Err() != nil { + // We've already been canceled; error out. + return nil, sup.ctx.Err() } - // If deduction tells us this is slow, do it async in its own goroutine; - // otherwise, we can do it here and give the scheduler a bit of a break. - if df.rslow { - go rootf() + if existingInfo, has := sup.running[ci]; has { + existingInfo.count++ + sup.running[ci] = existingInfo } else { - rootf() - } - - // Store a closure bound to the future result on the futTracker. - ft.rootf = func() (string, error) { - <-ft.rc - return pr, rooterr + sup.running[ci] = timeCount{ + count: 1, + start: time.Now(), + } } - // Root future is handled, now build up the source future. - // - // First, complete the partialSourceFuture with information the sm has about - // our cachedir and analyzer - fut := df.psf(sm.cachedir, sm.an) - - // The maybeSource-trying process is always slow, so keep it async here. - var src source - var ident string - var srcerr error - go func() { - defer close(ft.sc) - src, ident, srcerr = fut() - if srcerr != nil { - // Don't cache errs. This doesn't really hurt the solver, and is - // beneficial for other use cases because it means we don't have - // to expose any kind of controls for clearing caches. - return - } + return sup.ctx, nil +} - sm.srcmut.Lock() - defer sm.srcmut.Unlock() +func (sup *supervisor) count() int { + sup.mu.Lock() + defer sup.mu.Unlock() + return len(sup.running) +} - // Check to make sure a source hasn't shown up in the meantime, or that - // there wasn't already one at the ident. - var hasi, hasp bool - var srci, srcp source - if ident != "" { - srci, hasi = sm.srcs[ident] - } - srcp, hasp = sm.srcs[path] - - // if neither the ident nor the input path have an entry for this src, - // we're in the simple case - write them both in and we're done - if !hasi && !hasp { - sm.srcs[path] = src - if ident != path && ident != "" { - sm.srcs[ident] = src - } - return - } +func (sup *supervisor) done(ci callInfo) { + sup.mu.Lock() - // Now, the xors. - // - // If already present for ident but not for path, copy ident's src - // to path. This covers cases like a gopkg.in path referring back - // onto a github repository, where something else already explicitly - // looked up that same gh repo. - if hasi && !hasp { - sm.srcs[path] = srci - src = srci - } - // If already present for path but not for ident, do NOT copy path's - // src to ident, but use the returned one instead. Really, this case - // shouldn't occur at all...? But the crucial thing is that the - // path-based one has already discovered what actual ident of source - // they want to use, and changing that arbitrarily would have - // undefined effects. - if hasp && !hasi && ident != "" { - sm.srcs[ident] = src - } + existingInfo, has := sup.running[ci] + if !has { + panic(fmt.Sprintf("sourceMgr: tried to complete a call that had not registered via run()")) + } - // If both are present, then assume we're good, and use the path one - if hasp && hasi { - // TODO(sdboyer) compare these (somehow? reflect? pointer?) and if they're not the - // same object, panic - src = srcp + if existingInfo.count > 1 { + // If more than one is pending, don't stop the clock yet. + existingInfo.count-- + sup.running[ci] = existingInfo + } else { + // Last one for this particular key; update metrics with info. + durCnt := sup.ran[ci.typ] + durCnt.count++ + durCnt.dur += time.Now().Sub(existingInfo.start) + sup.ran[ci.typ] = durCnt + delete(sup.running, ci) + + if len(sup.running) == 0 { + // This is the only place where we signal the cond, as it's the only + // time that the number of running calls could become zero. + sup.cond.Signal() } - }() + } + sup.mu.Unlock() +} - ft.srcf = func() (source, string, error) { - <-ft.sc - return src, ident, srcerr +// wait until all active calls have terminated. +// +// Assumes something else has already canceled the supervisor via its context. +func (sup *supervisor) wait() { + sup.cond.L.Lock() + for len(sup.running) > 0 { + sup.cond.Wait() } + sup.cond.L.Unlock() +} + +type callType uint + +const ( + ctHTTPMetadata callType = iota + ctListVersions + ctGetManifestAndLock + ctListPackages + ctSourcePing + ctSourceInit + ctSourceFetch + ctCheckoutVersion + ctExportTree +) - sm.srcfuts[path] = ft - return ft, nil +// callInfo provides metadata about an ongoing call. +type callInfo struct { + name string + typ callType } diff --git a/vendor/github.com/sdboyer/gps/source_test.go b/vendor/github.com/sdboyer/gps/source_test.go index d3c84bbf61..6aae7a3787 100644 --- a/vendor/github.com/sdboyer/gps/source_test.go +++ b/vendor/github.com/sdboyer/gps/source_test.go @@ -1,498 +1,171 @@ package gps import ( + "context" + "fmt" "io/ioutil" - "net/url" - "os/exec" "reflect" - "sync" "testing" -) - -func TestGitSourceInteractions(t *testing.T) { - // This test is slowish, skip it on -short - if testing.Short() { - t.Skip("Skipping git source version fetching test in short mode") - } - requiresBins(t, "git") - - cpath, err := ioutil.TempDir("", "smcache") - if err != nil { - t.Errorf("Failed to create temp dir: %s", err) - } - rf := func() { - err := removeAll(cpath) - if err != nil { - t.Errorf("removeAll failed: %s", err) - } - } - n := "github.com/sdboyer/gpkt" - un := "https://" + n - u, err := url.Parse(un) - if err != nil { - t.Errorf("URL was bad, lolwut? errtext: %s", err) - rf() - t.FailNow() - } - mb := maybeGitSource{ - url: u, - } - - isrc, ident, err := mb.try(cpath, naiveAnalyzer{}) - if err != nil { - t.Errorf("Unexpected error while setting up gitSource for test repo: %s", err) - rf() - t.FailNow() - } - src, ok := isrc.(*gitSource) - if !ok { - t.Errorf("Expected a gitSource, got a %T", isrc) - rf() - t.FailNow() - } - if ident != un { - t.Errorf("Expected %s as source ident, got %s", un, ident) - } - - vlist, err := src.listVersions() - if err != nil { - t.Errorf("Unexpected error getting version pairs from git repo: %s", err) - rf() - t.FailNow() - } - - if src.ex.s&existsUpstream != existsUpstream { - t.Errorf("gitSource.listVersions() should have set the upstream existence bit for search") - } - if src.ex.f&existsUpstream != existsUpstream { - t.Errorf("gitSource.listVersions() should have set the upstream existence bit for found") - } - if src.ex.s&existsInCache != 0 { - t.Errorf("gitSource.listVersions() should not have set the cache existence bit for search") - } - if src.ex.f&existsInCache != 0 { - t.Errorf("gitSource.listVersions() should not have set the cache existence bit for found") - } - - // check that an expected rev is present - is, err := src.revisionPresentIn(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")) - if err != nil { - t.Errorf("Unexpected error while checking revision presence: %s", err) - } else if !is { - t.Errorf("Revision that should exist was not present") - } - - if len(vlist) != 7 { - t.Errorf("git test repo should've produced seven versions, got %v: vlist was %s", len(vlist), vlist) - } else { - SortForUpgrade(vlist) - evl := []Version{ - NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), - NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), - NewVersion("v1.0.0").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), - newDefaultBranch("master").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), - NewBranch("v1").Is(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")), - NewBranch("v1.1").Is(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")), - NewBranch("v3").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), - } - if !reflect.DeepEqual(vlist, evl) { - t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) - } - } + "github.com/sdboyer/gps/pkgtree" +) - // recheck that rev is present, this time interacting with cache differently - is, err = src.revisionPresentIn(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")) - if err != nil { - t.Errorf("Unexpected error while re-checking revision presence: %s", err) - } else if !is { - t.Errorf("Revision that should exist was not present on re-check") - } -} +// Executed in parallel by TestSlowVcs +func testSourceGateway(t *testing.T) { + t.Parallel() -func TestGopkginSourceInteractions(t *testing.T) { - // This test is slowish, skip it on -short if testing.Short() { - t.Skip("Skipping gopkg.in source version fetching test in short mode") + t.Skip("Skipping gateway testing in short mode") } requiresBins(t, "git") - cpath, err := ioutil.TempDir("", "smcache") + cachedir, err := ioutil.TempDir("", "smcache") if err != nil { - t.Errorf("Failed to create temp dir: %s", err) - } - rf := func() { - err := removeAll(cpath) - if err != nil { - t.Errorf("removeAll failed: %s", err) - } + t.Fatalf("failed to create temp dir: %s", err) } + bgc := context.Background() + ctx, cancelFunc := context.WithCancel(bgc) + defer func() { + removeAll(cachedir) + cancelFunc() + }() - tfunc := func(opath, n string, major uint64, evl []Version) { - un := "https://" + n - u, err := url.Parse(un) - if err != nil { - t.Errorf("URL was bad, lolwut? errtext: %s", err) - return - } - mb := maybeGopkginSource{ - opath: opath, - url: u, - major: major, - } - - isrc, ident, err := mb.try(cpath, naiveAnalyzer{}) - if err != nil { - t.Errorf("Unexpected error while setting up gopkginSource for test repo: %s", err) - return - } - src, ok := isrc.(*gopkginSource) - if !ok { - t.Errorf("Expected a gopkginSource, got a %T", isrc) - return - } - if ident != un { - t.Errorf("Expected %s as source ident, got %s", un, ident) - } - if src.major != major { - t.Errorf("Expected %v as major version filter on gopkginSource, got %v", major, src.major) - } - - // check that an expected rev is present - rev := evl[0].(PairedVersion).Underlying() - is, err := src.revisionPresentIn(rev) - if err != nil { - t.Errorf("Unexpected error while checking revision presence: %s", err) - } else if !is { - t.Errorf("Revision %s that should exist was not present", rev) - } - - vlist, err := src.listVersions() - if err != nil { - t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) - } - - if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("gopkginSource.listVersions() should have set the upstream and cache existence bits for search") - } - if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("gopkginSource.listVersions() should have set the upstream and cache existence bits for found") - } + do := func(wantstate sourceState) func(t *testing.T) { + return func(t *testing.T) { + superv := newSupervisor(ctx) + sc := newSourceCoordinator(superv, newDeductionCoordinator(superv), cachedir) - if len(vlist) != len(evl) { - t.Errorf("gopkgin test repo should've produced %v versions, got %v", len(evl), len(vlist)) - } else { - SortForUpgrade(vlist) - if !reflect.DeepEqual(vlist, evl) { - t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + id := mkPI("github.com/sdboyer/deptest") + sg, err := sc.getSourceGatewayFor(ctx, id) + if err != nil { + t.Fatal(err) } - } - - // Run again, this time to ensure cache outputs correctly - vlist, err = src.listVersions() - if err != nil { - t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) - } - - if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("gopkginSource.listVersions() should have set the upstream and cache existence bits for search") - } - if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("gopkginSource.listVersions() should have set the upstream and cache existence bits for found") - } - if len(vlist) != len(evl) { - t.Errorf("gopkgin test repo should've produced %v versions, got %v", len(evl), len(vlist)) - } else { - SortForUpgrade(vlist) - if !reflect.DeepEqual(vlist, evl) { - t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + if _, ok := sg.src.(*gitSource); !ok { + t.Fatalf("Expected a gitSource, got a %T", sg.src) } - } - - // recheck that rev is present, this time interacting with cache differently - is, err = src.revisionPresentIn(rev) - if err != nil { - t.Errorf("Unexpected error while re-checking revision presence: %s", err) - } else if !is { - t.Errorf("Revision that should exist was not present on re-check") - } - } - - // simultaneously run for v1, v2, and v3 filters of the target repo - wg := &sync.WaitGroup{} - wg.Add(3) - go func() { - tfunc("gopkg.in/sdboyer/gpkt.v1", "github.com/sdboyer/gpkt", 1, []Version{ - NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), - NewVersion("v1.0.0").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), - newDefaultBranch("v1.1").Is(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")), - NewBranch("v1").Is(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")), - }) - wg.Done() - }() - - go func() { - tfunc("gopkg.in/sdboyer/gpkt.v2", "github.com/sdboyer/gpkt", 2, []Version{ - NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), - }) - wg.Done() - }() - - go func() { - tfunc("gopkg.in/sdboyer/gpkt.v3", "github.com/sdboyer/gpkt", 3, []Version{ - newDefaultBranch("v3").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), - }) - wg.Done() - }() - - wg.Wait() - rf() -} - -func TestBzrSourceInteractions(t *testing.T) { - // This test is quite slow (ugh bzr), so skip it on -short - if testing.Short() { - t.Skip("Skipping bzr source version fetching test in short mode") - } - requiresBins(t, "bzr") - - cpath, err := ioutil.TempDir("", "smcache") - if err != nil { - t.Errorf("Failed to create temp dir: %s", err) - } - rf := func() { - err := removeAll(cpath) - if err != nil { - t.Errorf("removeAll failed: %s", err) - } - } - - n := "launchpad.net/govcstestbzrrepo" - un := "https://" + n - u, err := url.Parse(un) - if err != nil { - t.Errorf("URL was bad, lolwut? errtext: %s", err) - rf() - t.FailNow() - } - mb := maybeBzrSource{ - url: u, - } - - isrc, ident, err := mb.try(cpath, naiveAnalyzer{}) - if err != nil { - t.Errorf("Unexpected error while setting up bzrSource for test repo: %s", err) - rf() - t.FailNow() - } - src, ok := isrc.(*bzrSource) - if !ok { - t.Errorf("Expected a bzrSource, got a %T", isrc) - rf() - t.FailNow() - } - if ident != un { - t.Errorf("Expected %s as source ident, got %s", un, ident) - } - evl := []Version{ - NewVersion("1.0.0").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")), - newDefaultBranch("(default)").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")), - } - - // check that an expected rev is present - is, err := src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")) - if err != nil { - t.Errorf("Unexpected error while checking revision presence: %s", err) - } else if !is { - t.Errorf("Revision that should exist was not present") - } - vlist, err := src.listVersions() - if err != nil { - t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err) - } - - if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for search") - } - if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for found") - } - - if len(vlist) != 2 { - t.Errorf("bzr test repo should've produced two versions, got %v", len(vlist)) - } else { - SortForUpgrade(vlist) - if !reflect.DeepEqual(vlist, evl) { - t.Errorf("bzr version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) - } - } - - // Run again, this time to ensure cache outputs correctly - vlist, err = src.listVersions() - if err != nil { - t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err) - } + if sg.srcState != wantstate { + t.Fatalf("expected state on initial create to be %v, got %v", wantstate, sg.srcState) + } - if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for search") - } - if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("bzrSource.listVersions() should have set the upstream and cache existence bits for found") - } + if err := sg.syncLocal(ctx); err != nil { + t.Fatalf("error on cloning git repo: %s", err) + } - if len(vlist) != 2 { - t.Errorf("bzr test repo should've produced two versions, got %v", len(vlist)) - } else { - SortForUpgrade(vlist) - if !reflect.DeepEqual(vlist, evl) { - t.Errorf("bzr version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) - } - } + cvlist := sg.cache.getAllVersions() + if len(cvlist) != 4 { + t.Fatalf("repo setup should've cached four versions, got %v: %s", len(cvlist), cvlist) + } - // recheck that rev is present, this time interacting with cache differently - is, err = src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")) - if err != nil { - t.Errorf("Unexpected error while re-checking revision presence: %s", err) - } else if !is { - t.Errorf("Revision that should exist was not present on re-check") - } -} + wanturl := "https://" + id.normalizedSource() + goturl, err := sg.sourceURL(ctx) + if err != nil { + t.Fatalf("got err from sourceURL: %s", err) + } + if wanturl != goturl { + t.Fatalf("Expected %s as source URL, got %s", wanturl, goturl) + } -func TestHgSourceInteractions(t *testing.T) { - // This test is slow, so skip it on -short - if testing.Short() { - t.Skip("Skipping hg source version fetching test in short mode") - } - requiresBins(t, "hg") + vlist, err := sg.listVersions(ctx) + if err != nil { + t.Fatalf("Unexpected error getting version pairs from git repo: %s", err) + } - cpath, err := ioutil.TempDir("", "smcache") - if err != nil { - t.Errorf("Failed to create temp dir: %s", err) - } - rf := func() { - err := removeAll(cpath) - if err != nil { - t.Errorf("removeAll failed: %s", err) - } - } + if len(vlist) != 4 { + t.Fatalf("git test repo should've produced four versions, got %v: vlist was %s", len(vlist), vlist) + } else { + SortPairedForUpgrade(vlist) + evl := []PairedVersion{ + NewVersion("v1.0.0").Is(Revision("ff2948a2ac8f538c4ecd55962e919d1e13e74baf")), + NewVersion("v0.8.1").Is(Revision("3f4c3bea144e112a69bbe5d8d01c1b09a544253f")), + NewVersion("v0.8.0").Is(Revision("ff2948a2ac8f538c4ecd55962e919d1e13e74baf")), + newDefaultBranch("master").Is(Revision("3f4c3bea144e112a69bbe5d8d01c1b09a544253f")), + } + if !reflect.DeepEqual(vlist, evl) { + t.Fatalf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + } + } - tfunc := func(n string, evl []Version) { - un := "https://" + n - u, err := url.Parse(un) - if err != nil { - t.Errorf("URL was bad, lolwut? errtext: %s", err) - return - } - mb := maybeHgSource{ - url: u, - } + rev := Revision("c575196502940c07bf89fd6d95e83b999162e051") + // check that an expected rev is not in cache + _, has := sg.cache.getVersionsFor(rev) + if has { + t.Fatal("shouldn't have bare revs in cache without specifically requesting them") + } - isrc, ident, err := mb.try(cpath, naiveAnalyzer{}) - if err != nil { - t.Errorf("Unexpected error while setting up hgSource for test repo: %s", err) - return - } - src, ok := isrc.(*hgSource) - if !ok { - t.Errorf("Expected a hgSource, got a %T", isrc) - return - } - if ident != un { - t.Errorf("Expected %s as source ident, got %s", un, ident) - } + is, err := sg.revisionPresentIn(ctx, Revision("c575196502940c07bf89fd6d95e83b999162e051")) + if err != nil { + t.Fatalf("unexpected error while checking revision presence: %s", err) + } else if !is { + t.Fatalf("revision that should exist was not present") + } - // check that an expected rev is present - is, err := src.revisionPresentIn(Revision("103d1bddef2199c80aad7c42041223083d613ef9")) - if err != nil { - t.Errorf("Unexpected error while checking revision presence: %s", err) - } else if !is { - t.Errorf("Revision that should exist was not present") - } + // check that an expected rev is not in cache + _, has = sg.cache.getVersionsFor(rev) + if !has { + t.Fatal("bare rev should be in cache after specific request for it") + } - vlist, err := src.listVersions() - if err != nil { - t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) - } + // Ensure that a bad rev doesn't work on any method that takes + // versions + badver := NewVersion("notexist") + wanterr := fmt.Errorf("version %q does not exist in source", badver) - if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for search") - } - if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for found") - } + _, _, err = sg.getManifestAndLock(ctx, ProjectRoot("github.com/sdboyer/deptest"), badver, naiveAnalyzer{}) + if err == nil { + t.Fatal("wanted err on nonexistent version") + } else if err.Error() != wanterr.Error() { + t.Fatalf("wanted nonexistent err when passing bad version, got: %s", err) + } - if len(vlist) != len(evl) { - t.Errorf("hg test repo should've produced %v versions, got %v", len(evl), len(vlist)) - } else { - SortForUpgrade(vlist) - if !reflect.DeepEqual(vlist, evl) { - t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + _, err = sg.listPackages(ctx, ProjectRoot("github.com/sdboyer/deptest"), badver) + if err == nil { + t.Fatal("wanted err on nonexistent version") + } else if err.Error() != wanterr.Error() { + t.Fatalf("wanted nonexistent err when passing bad version, got: %s", err) } - } - // Run again, this time to ensure cache outputs correctly - vlist, err = src.listVersions() - if err != nil { - t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) - } + err = sg.exportVersionTo(ctx, badver, cachedir) + if err == nil { + t.Fatal("wanted err on nonexistent version") + } else if err.Error() != wanterr.Error() { + t.Fatalf("wanted nonexistent err when passing bad version, got: %s", err) + } - if src.ex.s&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for search") - } - if src.ex.f&existsUpstream|existsInCache != existsUpstream|existsInCache { - t.Errorf("hgSource.listVersions() should have set the upstream and cache existence bits for found") - } + wantptree := pkgtree.PackageTree{ + ImportRoot: "github.com/sdboyer/deptest", + Packages: map[string]pkgtree.PackageOrErr{ + "github.com/sdboyer/deptest": pkgtree.PackageOrErr{ + P: pkgtree.Package{ + ImportPath: "github.com/sdboyer/deptest", + Name: "deptest", + Imports: []string{}, + }, + }, + }, + } - if len(vlist) != len(evl) { - t.Errorf("hg test repo should've produced %v versions, got %v", len(evl), len(vlist)) - } else { - SortForUpgrade(vlist) - if !reflect.DeepEqual(vlist, evl) { - t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + ptree, err := sg.listPackages(ctx, ProjectRoot("github.com/sdboyer/deptest"), Revision("ff2948a2ac8f538c4ecd55962e919d1e13e74baf")) + if err != nil { + t.Fatalf("unexpected err when getting package tree with known rev: %s", err) + } + if !reflect.DeepEqual(wantptree, ptree) { + t.Fatalf("got incorrect PackageTree:\n\t(GOT): %#v\n\t(WNT): %#v", wantptree, ptree) } - } - // recheck that rev is present, this time interacting with cache differently - is, err = src.revisionPresentIn(Revision("103d1bddef2199c80aad7c42041223083d613ef9")) - if err != nil { - t.Errorf("Unexpected error while re-checking revision presence: %s", err) - } else if !is { - t.Errorf("Revision that should exist was not present on re-check") + ptree, err = sg.listPackages(ctx, ProjectRoot("github.com/sdboyer/deptest"), NewVersion("v1.0.0")) + if err != nil { + t.Fatalf("unexpected err when getting package tree with unpaired good version: %s", err) + } + if !reflect.DeepEqual(wantptree, ptree) { + t.Fatalf("got incorrect PackageTree:\n\t(GOT): %#v\n\t(WNT): %#v", wantptree, ptree) + } } } - // simultaneously run for both the repo with and without the magic bookmark - donech := make(chan struct{}) - go func() { - tfunc("bitbucket.org/sdboyer/withbm", []Version{ - NewVersion("v1.0.0").Is(Revision("aa110802a0c64195d0a6c375c9f66668827c90b4")), - newDefaultBranch("@").Is(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")), - NewBranch("another").Is(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")), - NewBranch("default").Is(Revision("3d466f437f6616da594bbab6446cc1cb4328d1bb")), - NewBranch("newbranch").Is(Revision("5e2a01be9aee942098e44590ae545c7143da9675")), - }) - close(donech) - }() - - tfunc("bitbucket.org/sdboyer/nobm", []Version{ - NewVersion("v1.0.0").Is(Revision("aa110802a0c64195d0a6c375c9f66668827c90b4")), - newDefaultBranch("default").Is(Revision("3d466f437f6616da594bbab6446cc1cb4328d1bb")), - NewBranch("another").Is(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")), - NewBranch("newbranch").Is(Revision("5e2a01be9aee942098e44590ae545c7143da9675")), - }) - - <-donech - rf() -} - -// Fail a test if the specified binaries aren't installed. -func requiresBins(t *testing.T, bins ...string) { - for _, b := range bins { - _, err := exec.LookPath(b) - if err != nil { - t.Fatalf("%s is not installed", b) - } - } + // Run test twice so that we cover both the existing and non-existing case; + // only difference in results is the initial setup state. + t.Run("empty", do(sourceIsSetUp|sourceExistsUpstream|sourceHasLatestVersionList)) + t.Run("exists", do(sourceIsSetUp|sourceExistsLocally|sourceExistsUpstream|sourceHasLatestVersionList)) } diff --git a/vendor/github.com/sdboyer/gps/strip_vendor.go b/vendor/github.com/sdboyer/gps/strip_vendor.go new file mode 100644 index 0000000000..1814e9f95a --- /dev/null +++ b/vendor/github.com/sdboyer/gps/strip_vendor.go @@ -0,0 +1,26 @@ +//+build !windows + +package gps + +import "os" + +func stripVendor(path string, info os.FileInfo, err error) error { + if info.Name() == "vendor" { + if _, err := os.Lstat(path); err == nil { + if (info.Mode() & os.ModeSymlink) != 0 { + realInfo, err := os.Stat(path) + if err != nil { + return err + } + if realInfo.IsDir() { + return os.Remove(path) + } + } + if info.IsDir() { + return removeAll(path) + } + } + } + + return nil +} diff --git a/vendor/github.com/sdboyer/gps/strip_vendor_nonwindows_test.go b/vendor/github.com/sdboyer/gps/strip_vendor_nonwindows_test.go new file mode 100644 index 0000000000..36c4478156 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/strip_vendor_nonwindows_test.go @@ -0,0 +1,142 @@ +// +build !windows + +package gps + +import "testing" + +func TestStripVendorSymlinks(t *testing.T) { + t.Run("vendor symlink", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + fsPath{"package", "_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "vendor"}, + to: "_vendor", + }, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + fsPath{"package", "_vendor"}, + }, + }, + })) + + t.Run("nonvendor symlink", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + fsPath{"package", "_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "link"}, + to: "_vendor", + }, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + fsPath{"package", "_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "link"}, + to: "_vendor", + }, + }, + }, + })) + + t.Run("vendor symlink to file", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + files: []fsPath{ + fsPath{"file"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"vendor"}, + to: "file", + }, + }, + }, + after: filesystemState{ + files: []fsPath{ + fsPath{"file"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"vendor"}, + to: "file", + }, + }, + }, + })) + + t.Run("chained symlinks", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + dirs: []fsPath{ + fsPath{"_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"vendor"}, + to: "vendor2", + }, + fsLink{ + path: fsPath{"vendor2"}, + to: "_vendor", + }, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"vendor2"}, + to: "_vendor", + }, + }, + }, + })) + + t.Run("circular symlinks", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "link1"}, + to: "link2", + }, + fsLink{ + path: fsPath{"package", "link2"}, + to: "link1", + }, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "link1"}, + to: "link2", + }, + fsLink{ + path: fsPath{"package", "link2"}, + to: "link1", + }, + }, + }, + })) +} diff --git a/vendor/github.com/sdboyer/gps/strip_vendor_test.go b/vendor/github.com/sdboyer/gps/strip_vendor_test.go new file mode 100644 index 0000000000..273f386c3b --- /dev/null +++ b/vendor/github.com/sdboyer/gps/strip_vendor_test.go @@ -0,0 +1,67 @@ +package gps + +import ( + "io/ioutil" + "os" + "path/filepath" + "testing" +) + +func stripVendorTestCase(tc fsTestCase) func(*testing.T) { + return func(t *testing.T) { + tempDir, err := ioutil.TempDir("", "TestStripVendor") + if err != nil { + t.Fatalf("ioutil.TempDir err=%q", err) + } + defer func() { + if err := os.RemoveAll(tempDir); err != nil { + t.Errorf("os.RemoveAll(%q) err=%q", tempDir, err) + } + }() + tc.before.root = tempDir + tc.after.root = tempDir + + tc.before.setup(t) + + if err := filepath.Walk(tempDir, stripVendor); err != nil { + t.Errorf("filepath.Walk err=%q", err) + } + + tc.after.assert(t) + } +} + +func TestStripVendorDirectory(t *testing.T) { + t.Run("vendor directory", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + fsPath{"package", "vendor"}, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + }, + }, + })) + + t.Run("vendor file", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + }, + files: []fsPath{ + fsPath{"package", "vendor"}, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + }, + files: []fsPath{ + fsPath{"package", "vendor"}, + }, + }, + })) +} diff --git a/vendor/github.com/sdboyer/gps/strip_vendor_windows.go b/vendor/github.com/sdboyer/gps/strip_vendor_windows.go new file mode 100644 index 0000000000..147fde43a0 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/strip_vendor_windows.go @@ -0,0 +1,41 @@ +package gps + +import ( + "os" + "path/filepath" +) + +func stripVendor(path string, info os.FileInfo, err error) error { + if info.Name() == "vendor" { + if _, err := os.Lstat(path); err == nil { + symlink := (info.Mode() & os.ModeSymlink) != 0 + dir := info.IsDir() + + switch { + case symlink && dir: + // This could be a windows junction directory. Support for these in the + // standard library is spotty, and we could easily delete an important + // folder if we called os.Remove or os.RemoveAll. Just skip these. + // + // TODO: If we could distinguish between junctions and Windows symlinks, + // we might be able to safely delete symlinks, even though junctions are + // dangerous. + return filepath.SkipDir + + case symlink: + realInfo, err := os.Stat(path) + if err != nil { + return err + } + if realInfo.IsDir() { + return os.Remove(path) + } + + case dir: + return removeAll(path) + } + } + } + + return nil +} diff --git a/vendor/github.com/sdboyer/gps/strip_vendor_windows_test.go b/vendor/github.com/sdboyer/gps/strip_vendor_windows_test.go new file mode 100644 index 0000000000..2a01b627b9 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/strip_vendor_windows_test.go @@ -0,0 +1,154 @@ +// +build windows + +package gps + +import "testing" + +func TestStripVendorSymlinks(t *testing.T) { + // On windows, we skip symlinks, even if they're named 'vendor', because + // they're too hard to distinguish from junctions. + t.Run("vendor symlink", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + fsPath{"package", "_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "vendor"}, + to: "_vendor", + }, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + fsPath{"package", "_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "vendor"}, + to: "_vendor", + }, + }, + }, + })) + + t.Run("nonvendor symlink", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + fsPath{"package", "_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "link"}, + to: "_vendor", + }, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + fsPath{"package", "_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "link"}, + to: "_vendor", + }, + }, + }, + })) + + t.Run("vendor symlink to file", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + files: []fsPath{ + fsPath{"file"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"vendor"}, + to: "file", + }, + }, + }, + after: filesystemState{ + files: []fsPath{ + fsPath{"file"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"vendor"}, + to: "file", + }, + }, + }, + })) + + t.Run("chained symlinks", stripVendorTestCase(fsTestCase{ + // Curiously, if a symlink on windows points to *another* symlink which + // eventually points at a directory, we'll correctly remove that first + // symlink, because the first symlink doesn't appear to Go to be a + // directory. + before: filesystemState{ + dirs: []fsPath{ + fsPath{"_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"vendor"}, + to: "vendor2", + }, + fsLink{ + path: fsPath{"vendor2"}, + to: "_vendor", + }, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"_vendor"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"vendor2"}, + to: "_vendor", + }, + }, + }, + })) + + t.Run("circular symlinks", stripVendorTestCase(fsTestCase{ + before: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "link1"}, + to: "link2", + }, + fsLink{ + path: fsPath{"package", "link2"}, + to: "link1", + }, + }, + }, + after: filesystemState{ + dirs: []fsPath{ + fsPath{"package"}, + }, + links: []fsLink{ + fsLink{ + path: fsPath{"package", "link1"}, + to: "link2", + }, + fsLink{ + path: fsPath{"package", "link2"}, + to: "link1", + }, + }, + }, + })) +} diff --git a/vendor/github.com/sdboyer/gps/typed_radix.go b/vendor/github.com/sdboyer/gps/typed_radix.go index cf34e987ab..73d1ae827f 100644 --- a/vendor/github.com/sdboyer/gps/typed_radix.go +++ b/vendor/github.com/sdboyer/gps/typed_radix.go @@ -87,78 +87,6 @@ func (t *deducerTrie) ToMap() map[string]pathDeducer { return m } -type prTrie struct { - sync.RWMutex - t *radix.Tree -} - -func newProjectRootTrie() *prTrie { - return &prTrie{ - t: radix.New(), - } -} - -// Delete is used to delete a key, returning the previous value and if it was deleted -func (t *prTrie) Delete(s string) (ProjectRoot, bool) { - t.Lock() - defer t.Unlock() - if pr, had := t.t.Delete(s); had { - return pr.(ProjectRoot), had - } - return "", false -} - -// Get is used to lookup a specific key, returning the value and if it was found -func (t *prTrie) Get(s string) (ProjectRoot, bool) { - t.RLock() - defer t.RUnlock() - if pr, has := t.t.Get(s); has { - return pr.(ProjectRoot), has - } - return "", false -} - -// Insert is used to add a newentry or update an existing entry. Returns if updated. -func (t *prTrie) Insert(s string, pr ProjectRoot) (ProjectRoot, bool) { - t.Lock() - defer t.Unlock() - if pr2, had := t.t.Insert(s, pr); had { - return pr2.(ProjectRoot), had - } - return "", false -} - -// Len is used to return the number of elements in the tree -func (t *prTrie) Len() int { - t.RLock() - defer t.RUnlock() - return t.t.Len() -} - -// LongestPrefix is like Get, but instead of an exact match, it will return the -// longest prefix match. -func (t *prTrie) LongestPrefix(s string) (string, ProjectRoot, bool) { - t.RLock() - defer t.RUnlock() - if p, pr, has := t.t.LongestPrefix(s); has && isPathPrefixOrEqual(p, s) { - return p, pr.(ProjectRoot), has - } - return "", "", false -} - -// ToMap is used to walk the tree and convert it to a map. -func (t *prTrie) ToMap() map[string]ProjectRoot { - t.RLock() - m := make(map[string]ProjectRoot) - t.t.Walk(func(s string, pr interface{}) bool { - m[s] = pr.(ProjectRoot) - return false - }) - - t.RUnlock() - return m -} - // isPathPrefixOrEqual is an additional helper check to ensure that the literal // string prefix returned from a radix tree prefix match is also a path tree // match. diff --git a/vendor/github.com/sdboyer/gps/vcs_repo.go b/vendor/github.com/sdboyer/gps/vcs_repo.go index d2e992a49e..a3e3cdcb14 100644 --- a/vendor/github.com/sdboyer/gps/vcs_repo.go +++ b/vendor/github.com/sdboyer/gps/vcs_repo.go @@ -1,9 +1,8 @@ package gps import ( - "bytes" + "context" "encoding/xml" - "io/ioutil" "os" "path/filepath" "runtime" @@ -13,6 +12,14 @@ import ( "github.com/Masterminds/vcs" ) +type ctxRepo interface { + vcs.Repo + get(context.Context) error + fetch(context.Context) error + updateVersion(context.Context, string) error + //ping(context.Context) (bool, error) +} + // original implementation of these methods come from // https://github.com/Masterminds/vcs @@ -20,150 +27,106 @@ type gitRepo struct { *vcs.GitRepo } -func (r *gitRepo) Get() error { - out, err := runFromCwd("git", "clone", "--recursive", r.Remote(), r.LocalPath()) - - // There are some windows cases where Git cannot create the parent directory, - // if it does not already exist, to the location it's trying to create the - // repo. Catch that error and try to handle it. - if err != nil && r.isUnableToCreateDir(err) { - basePath := filepath.Dir(filepath.FromSlash(r.LocalPath())) - if _, err := os.Stat(basePath); os.IsNotExist(err) { - err = os.MkdirAll(basePath, 0755) - if err != nil { - return vcs.NewLocalError("unable to create directory", err, "") - } - - out, err = runFromCwd("git", "clone", r.Remote(), r.LocalPath()) - if err != nil { - return vcs.NewRemoteError("unable to get repository", err, string(out)) - } - return err - } - } else if err != nil { - return vcs.NewRemoteError("unable to get repository", err, string(out)) +func newVcsRemoteErrorOr(msg string, err error, out string) error { + if err == context.Canceled || err == context.DeadlineExceeded { + return err } - - return nil + return vcs.NewRemoteError(msg, err, out) } -func (r *gitRepo) Update() error { - // Perform a fetch to make sure everything is up to date. - out, err := runFromRepoDir(r, "git", "fetch", "--tags", r.RemoteLocation) - if err != nil { - return vcs.NewRemoteError("unable to update repository", err, string(out)) +func newVcsLocalErrorOr(msg string, err error, out string) error { + if err == context.Canceled || err == context.DeadlineExceeded { + return err } + return vcs.NewLocalError(msg, err, out) +} - // When in a detached head state, such as when an individual commit is checked - // out do not attempt a pull. It will cause an error. - detached, err := r.isDetachedHead() +func (r *gitRepo) get(ctx context.Context) error { + out, err := runFromCwd(ctx, "git", "clone", "--recursive", r.Remote(), r.LocalPath()) if err != nil { - return vcs.NewLocalError("unable to update repository", err, "") + return newVcsRemoteErrorOr("unable to get repository", err, string(out)) } - if detached { - return nil + return nil +} + +func (r *gitRepo) fetch(ctx context.Context) error { + // Perform a fetch to make sure everything is up to date. + out, err := runFromRepoDir(ctx, r, "git", "fetch", "--tags", "--prune", r.RemoteLocation) + if err != nil { + return newVcsRemoteErrorOr("unable to update repository", err, string(out)) } + return nil +} - out, err = runFromRepoDir(r, "git", "pull") +func (r *gitRepo) updateVersion(ctx context.Context, v string) error { + out, err := runFromRepoDir(ctx, r, "git", "checkout", v) if err != nil { - return vcs.NewRemoteError("unable to update repository", err, string(out)) + return newVcsLocalErrorOr("Unable to update checked out version", err, string(out)) } - return r.defendAgainstSubmodules() + return r.defendAgainstSubmodules(ctx) } // defendAgainstSubmodules tries to keep repo state sane in the event of // submodules. Or nested submodules. What a great idea, submodules. -func (r *gitRepo) defendAgainstSubmodules() error { +func (r *gitRepo) defendAgainstSubmodules(ctx context.Context) error { // First, update them to whatever they should be, if there should happen to be any. - out, err := runFromRepoDir(r, "git", "submodule", "update", "--init", "--recursive") + out, err := runFromRepoDir(ctx, r, "git", "submodule", "update", "--init", "--recursive") if err != nil { - return vcs.NewLocalError("unexpected error while defensively updating submodules", err, string(out)) + return newVcsLocalErrorOr("unexpected error while defensively updating submodules", err, string(out)) } // Now, do a special extra-aggressive clean in case changing versions caused // one or more submodules to go away. - out, err = runFromRepoDir(r, "git", "clean", "-x", "-d", "-f", "-f") + out, err = runFromRepoDir(ctx, r, "git", "clean", "-x", "-d", "-f", "-f") if err != nil { - return vcs.NewLocalError("unexpected error while defensively cleaning up after possible derelict submodule directories", err, string(out)) + return newVcsLocalErrorOr("unexpected error while defensively cleaning up after possible derelict submodule directories", err, string(out)) } // Then, repeat just in case there are any nested submodules that went away. - out, err = runFromRepoDir(r, "git", "submodule", "foreach", "--recursive", "git", "clean", "-x", "-d", "-f", "-f") + out, err = runFromRepoDir(ctx, r, "git", "submodule", "foreach", "--recursive", "git", "clean", "-x", "-d", "-f", "-f") if err != nil { - return vcs.NewLocalError("unexpected error while defensively cleaning up after possible derelict nested submodule directories", err, string(out)) + return newVcsLocalErrorOr("unexpected error while defensively cleaning up after possible derelict nested submodule directories", err, string(out)) } return nil } -// isUnableToCreateDir checks for an error in the command to see if an error -// where the parent directory of the VCS local path doesn't exist. This is -// done in a multi-lingual manner. -func (r *gitRepo) isUnableToCreateDir(err error) bool { - msg := err.Error() - if strings.HasPrefix(msg, "could not create work tree dir") || - strings.HasPrefix(msg, "不能创建工作区目录") || - strings.HasPrefix(msg, "no s'ha pogut crear el directori d'arbre de treball") || - strings.HasPrefix(msg, "impossible de créer le répertoire de la copie de travail") || - strings.HasPrefix(msg, "kunde inte skapa arbetskatalogen") || - (strings.HasPrefix(msg, "Konnte Arbeitsverzeichnis") && strings.Contains(msg, "nicht erstellen")) || - (strings.HasPrefix(msg, "작업 디렉터리를") && strings.Contains(msg, "만들 수 없습니다")) { - return true - } - - return false -} - -// isDetachedHead will detect if git repo is in "detached head" state. -func (r *gitRepo) isDetachedHead() (bool, error) { - p := filepath.Join(r.LocalPath(), ".git", "HEAD") - contents, err := ioutil.ReadFile(p) - if err != nil { - return false, err - } - - contents = bytes.TrimSpace(contents) - if bytes.HasPrefix(contents, []byte("ref: ")) { - return false, nil - } - - return true, nil -} - type bzrRepo struct { *vcs.BzrRepo } -func (r *bzrRepo) Get() error { +func (r *bzrRepo) get(ctx context.Context) error { basePath := filepath.Dir(filepath.FromSlash(r.LocalPath())) if _, err := os.Stat(basePath); os.IsNotExist(err) { err = os.MkdirAll(basePath, 0755) if err != nil { - return vcs.NewLocalError("unable to create directory", err, "") + return newVcsLocalErrorOr("unable to create directory", err, "") } } - out, err := runFromCwd("bzr", "branch", r.Remote(), r.LocalPath()) + out, err := runFromCwd(ctx, "bzr", "branch", r.Remote(), r.LocalPath()) if err != nil { - return vcs.NewRemoteError("unable to get repository", err, string(out)) + return newVcsRemoteErrorOr("unable to get repository", err, string(out)) } return nil } -func (r *bzrRepo) Update() error { - out, err := runFromRepoDir(r, "bzr", "pull") +func (r *bzrRepo) fetch(ctx context.Context) error { + out, err := runFromRepoDir(ctx, r, "bzr", "pull") if err != nil { - return vcs.NewRemoteError("unable to update repository", err, string(out)) + return newVcsRemoteErrorOr("unable to update repository", err, string(out)) } + return nil +} - out, err = runFromRepoDir(r, "bzr", "update") +func (r *bzrRepo) updateVersion(ctx context.Context, version string) error { + out, err := runFromRepoDir(ctx, r, "bzr", "update", "-r", version) if err != nil { - return vcs.NewRemoteError("unable to update repository", err, string(out)) + return newVcsLocalErrorOr("unable to update checked out version", err, string(out)) } - return nil } @@ -171,33 +134,27 @@ type hgRepo struct { *vcs.HgRepo } -func (r *hgRepo) Get() error { - out, err := runFromCwd("hg", "clone", r.Remote(), r.LocalPath()) +func (r *hgRepo) get(ctx context.Context) error { + out, err := runFromCwd(ctx, "hg", "clone", r.Remote(), r.LocalPath()) if err != nil { - return vcs.NewRemoteError("unable to get repository", err, string(out)) + return newVcsRemoteErrorOr("unable to get repository", err, string(out)) } return nil } -func (r *hgRepo) Update() error { - return r.UpdateVersion(``) -} - -func (r *hgRepo) UpdateVersion(version string) error { - out, err := runFromRepoDir(r, "hg", "pull") +func (r *hgRepo) fetch(ctx context.Context) error { + out, err := runFromRepoDir(ctx, r, "hg", "pull") if err != nil { - return vcs.NewRemoteError("unable to update checked out version", err, string(out)) - } - - if len(strings.TrimSpace(version)) > 0 { - out, err = runFromRepoDir(r, "hg", "update", version) - } else { - out, err = runFromRepoDir(r, "hg", "update") + return newVcsRemoteErrorOr("unable to fetch latest changes", err, string(out)) } + return nil +} +func (r *hgRepo) updateVersion(ctx context.Context, version string) error { + out, err := runFromRepoDir(ctx, r, "hg", "update", version) if err != nil { - return vcs.NewRemoteError("unable to update checked out version", err, string(out)) + return newVcsRemoteErrorOr("unable to update checked out version", err, string(out)) } return nil @@ -207,7 +164,7 @@ type svnRepo struct { *vcs.SvnRepo } -func (r *svnRepo) Get() error { +func (r *svnRepo) get(ctx context.Context) error { remote := r.Remote() if strings.HasPrefix(remote, "/") { remote = "file://" + remote @@ -215,33 +172,34 @@ func (r *svnRepo) Get() error { remote = "file:///" + remote } - out, err := runFromCwd("svn", "checkout", remote, r.LocalPath()) + out, err := runFromCwd(ctx, "svn", "checkout", remote, r.LocalPath()) if err != nil { - return vcs.NewRemoteError("unable to get repository", err, string(out)) + return newVcsRemoteErrorOr("unable to get repository", err, string(out)) } return nil } -func (r *svnRepo) Update() error { - out, err := runFromRepoDir(r, "svn", "update") +func (r *svnRepo) update(ctx context.Context) error { + out, err := runFromRepoDir(ctx, r, "svn", "update") if err != nil { - return vcs.NewRemoteError("unable to update repository", err, string(out)) + return newVcsRemoteErrorOr("unable to update repository", err, string(out)) } return err } -func (r *svnRepo) UpdateVersion(version string) error { - out, err := runFromRepoDir(r, "svn", "update", "-r", version) +func (r *svnRepo) updateVersion(ctx context.Context, version string) error { + out, err := runFromRepoDir(ctx, r, "svn", "update", "-r", version) if err != nil { - return vcs.NewRemoteError("unable to update checked out version", err, string(out)) + return newVcsRemoteErrorOr("unable to update checked out version", err, string(out)) } return nil } func (r *svnRepo) CommitInfo(id string) (*vcs.CommitInfo, error) { + ctx := context.TODO() // There are cases where Svn log doesn't return anything for HEAD or BASE. // svn info does provide details for these but does not have elements like // the commit message. @@ -254,15 +212,15 @@ func (r *svnRepo) CommitInfo(id string) (*vcs.CommitInfo, error) { Commit commit `xml:"entry>commit"` } - out, err := runFromRepoDir(r, "svn", "info", "-r", id, "--xml") + out, err := runFromRepoDir(ctx, r, "svn", "info", "-r", id, "--xml") if err != nil { - return nil, vcs.NewLocalError("unable to retrieve commit information", err, string(out)) + return nil, newVcsLocalErrorOr("unable to retrieve commit information", err, string(out)) } infos := new(info) err = xml.Unmarshal(out, &infos) if err != nil { - return nil, vcs.NewLocalError("unable to retrieve commit information", err, string(out)) + return nil, newVcsLocalErrorOr("unable to retrieve commit information", err, string(out)) } id = infos.Commit.Revision @@ -271,9 +229,9 @@ func (r *svnRepo) CommitInfo(id string) (*vcs.CommitInfo, error) { } } - out, err := runFromRepoDir(r, "svn", "log", "-r", id, "--xml") + out, err := runFromRepoDir(ctx, r, "svn", "log", "-r", id, "--xml") if err != nil { - return nil, vcs.NewRemoteError("unable to retrieve commit information", err, string(out)) + return nil, newVcsRemoteErrorOr("unable to retrieve commit information", err, string(out)) } type logentry struct { @@ -290,7 +248,7 @@ func (r *svnRepo) CommitInfo(id string) (*vcs.CommitInfo, error) { logs := new(log) err = xml.Unmarshal(out, &logs) if err != nil { - return nil, vcs.NewLocalError("unable to retrieve commit information", err, string(out)) + return nil, newVcsLocalErrorOr("unable to retrieve commit information", err, string(out)) } if len(logs.Logs) == 0 { @@ -306,7 +264,7 @@ func (r *svnRepo) CommitInfo(id string) (*vcs.CommitInfo, error) { if len(logs.Logs[0].Date) > 0 { ci.Date, err = time.Parse(time.RFC3339Nano, logs.Logs[0].Date) if err != nil { - return nil, vcs.NewLocalError("unable to retrieve commit information", err, string(out)) + return nil, newVcsLocalErrorOr("unable to retrieve commit information", err, string(out)) } } diff --git a/vendor/github.com/sdboyer/gps/vcs_repo_test.go b/vendor/github.com/sdboyer/gps/vcs_repo_test.go index 722edb3483..f832798c09 100644 --- a/vendor/github.com/sdboyer/gps/vcs_repo_test.go +++ b/vendor/github.com/sdboyer/gps/vcs_repo_test.go @@ -1,6 +1,8 @@ package gps import ( + "context" + "errors" "io/ioutil" "os" "testing" @@ -12,14 +14,45 @@ import ( // original implementation of these test files come from // https://github.com/Masterminds/vcs test files -func TestSvnRepo(t *testing.T) { +func TestErrs(t *testing.T) { + err := newVcsLocalErrorOr("", context.Canceled, "") + if err != context.Canceled { + t.Errorf("context errors should always pass through, got %s", err) + } + err = newVcsRemoteErrorOr("", context.Canceled, "") + if err != context.Canceled { + t.Errorf("context errors should always pass through, got %s", err) + } + err = newVcsLocalErrorOr("", context.DeadlineExceeded, "") + if err != context.DeadlineExceeded { + t.Errorf("context errors should always pass through, got %s", err) + } + err = newVcsRemoteErrorOr("", context.DeadlineExceeded, "") + if err != context.DeadlineExceeded { + t.Errorf("context errors should always pass through, got %s", err) + } + + err = newVcsLocalErrorOr("foo", errors.New("bar"), "baz") + if _, is := err.(*vcs.LocalError); !is { + t.Errorf("should have gotten local error, got %T %v", err, err) + } + err = newVcsRemoteErrorOr("foo", errors.New("bar"), "baz") + if _, is := err.(*vcs.RemoteError); !is { + t.Errorf("should have gotten remote error, got %T %v", err, err) + } +} + +func testSvnRepo(t *testing.T) { + t.Parallel() + if testing.Short() { t.Skip("Skipping slow test in short mode") } + ctx := context.Background() tempDir, err := ioutil.TempDir("", "go-vcs-svn-tests") if err != nil { - t.Error(err) + t.Fatal(err) } defer func() { err = os.RemoveAll(tempDir) @@ -30,54 +63,54 @@ func TestSvnRepo(t *testing.T) { rep, err := vcs.NewSvnRepo("https://github.com/Masterminds/VCSTestRepo/trunk", tempDir+string(os.PathSeparator)+"VCSTestRepo") if err != nil { - t.Error(err) + t.Fatal(err) } repo := &svnRepo{rep} // Do an initial checkout. - err = repo.Get() + err = repo.get(ctx) if err != nil { - t.Errorf("Unable to checkout SVN repo. Err was %s", err) + t.Fatalf("Unable to checkout SVN repo. Err was %#v", err) } // Verify SVN repo is a SVN repo if !repo.CheckLocal() { - t.Error("Problem checking out repo or SVN CheckLocal is not working") + t.Fatal("Problem checking out repo or SVN CheckLocal is not working") } // Update the version to a previous version. - err = repo.UpdateVersion("r2") + err = repo.updateVersion(ctx, "r2") if err != nil { - t.Errorf("Unable to update SVN repo version. Err was %s", err) + t.Fatalf("Unable to update SVN repo version. Err was %s", err) } // Use Version to verify we are on the right version. v, err := repo.Version() - if v != "2" { - t.Error("Error checking checked SVN out version") - } if err != nil { - t.Error(err) + t.Fatal(err) + } + if v != "2" { + t.Fatal("Error checking checked SVN out version") } // Perform an update which should take up back to the latest version. - err = repo.Update() + err = repo.update(ctx) if err != nil { - t.Error(err) + t.Fatal(err) } // Make sure we are on a newer version because of the update. v, err = repo.Version() - if v == "2" { - t.Error("Error with version. Still on old version. Update failed") - } if err != nil { - t.Error(err) + t.Fatal(err) + } + if v == "2" { + t.Fatal("Error with version. Still on old version. Update failed") } ci, err := repo.CommitInfo("2") if err != nil { - t.Error(err) + t.Fatal(err) } if ci.Commit != "2" { t.Error("Svn.CommitInfo wrong commit id") @@ -90,7 +123,7 @@ func TestSvnRepo(t *testing.T) { } ti, err := time.Parse(time.RFC3339Nano, "2015-07-29T13:46:20.000000Z") if err != nil { - t.Error(err) + t.Fatal(err) } if !ti.Equal(ci.Date) { t.Error("Svn.CommitInfo wrong date") @@ -102,14 +135,17 @@ func TestSvnRepo(t *testing.T) { } } -func TestHgRepo(t *testing.T) { +func testHgRepo(t *testing.T) { + t.Parallel() + if testing.Short() { t.Skip("Skipping slow test in short mode") } + ctx := context.Background() tempDir, err := ioutil.TempDir("", "go-vcs-hg-tests") if err != nil { - t.Error(err) + t.Fatal(err) } defer func() { @@ -121,60 +157,55 @@ func TestHgRepo(t *testing.T) { rep, err := vcs.NewHgRepo("https://bitbucket.org/mattfarina/testhgrepo", tempDir+"/testhgrepo") if err != nil { - t.Error(err) + t.Fatal(err) } repo := &hgRepo{rep} // Do an initial clone. - err = repo.Get() + err = repo.get(ctx) if err != nil { - t.Errorf("Unable to clone Hg repo. Err was %s", err) + t.Fatalf("Unable to clone Hg repo. Err was %s", err) } // Verify Hg repo is a Hg repo if !repo.CheckLocal() { - t.Error("Problem checking out repo or Hg CheckLocal is not working") + t.Fatal("Problem checking out repo or Hg CheckLocal is not working") } // Set the version using the short hash. - err = repo.UpdateVersion("a5494ba2177f") + err = repo.updateVersion(ctx, "a5494ba2177f") if err != nil { - t.Errorf("Unable to update Hg repo version. Err was %s", err) + t.Fatalf("Unable to update Hg repo version. Err was %s", err) } // Use Version to verify we are on the right version. v, err := repo.Version() - if v != "a5494ba2177ff9ef26feb3c155dfecc350b1a8ef" { - t.Errorf("Error checking checked out Hg version: %s", v) - } if err != nil { - t.Error(err) + t.Fatal(err) } - - // Perform an update. - err = repo.Update() - if err != nil { - t.Error(err) + if v != "a5494ba2177ff9ef26feb3c155dfecc350b1a8ef" { + t.Fatalf("Error checking checked out Hg version: %s", v) } - v, err = repo.Version() - if v != "9c6ccbca73e8a1351c834f33f57f1f7a0329ad35" { - t.Errorf("Error checking checked out Hg version: %s", v) - } + // Perform an update. + err = repo.fetch(ctx) if err != nil { - t.Error(err) + t.Fatal(err) } } -func TestGitRepo(t *testing.T) { +func testGitRepo(t *testing.T) { + t.Parallel() + if testing.Short() { t.Skip("Skipping slow test in short mode") } + ctx := context.Background() tempDir, err := ioutil.TempDir("", "go-vcs-git-tests") if err != nil { - t.Error(err) + t.Fatal(err) } defer func() { @@ -186,69 +217,72 @@ func TestGitRepo(t *testing.T) { rep, err := vcs.NewGitRepo("https://github.com/Masterminds/VCSTestRepo", tempDir+"/VCSTestRepo") if err != nil { - t.Error(err) + t.Fatal(err) } repo := &gitRepo{rep} // Do an initial clone. - err = repo.Get() + err = repo.get(ctx) if err != nil { - t.Errorf("Unable to clone Git repo. Err was %s", err) + t.Fatalf("Unable to clone Git repo. Err was %s", err) } // Verify Git repo is a Git repo if !repo.CheckLocal() { - t.Error("Problem checking out repo or Git CheckLocal is not working") + t.Fatal("Problem checking out repo or Git CheckLocal is not working") } // Perform an update. - err = repo.Update() + err = repo.fetch(ctx) if err != nil { - t.Error(err) + t.Fatal(err) } v, err := repo.Current() if err != nil { - t.Errorf("Error trying Git Current: %s", err) + t.Fatalf("Error trying Git Current: %s", err) } if v != "master" { - t.Errorf("Current failed to detect Git on tip of master. Got version: %s", v) + t.Fatalf("Current failed to detect Git on tip of master. Got version: %s", v) } // Set the version using the short hash. - err = repo.UpdateVersion("806b07b") + err = repo.updateVersion(ctx, "806b07b") if err != nil { - t.Errorf("Unable to update Git repo version. Err was %s", err) + t.Fatalf("Unable to update Git repo version. Err was %s", err) } // Once a ref has been checked out the repo is in a detached head state. // Trying to pull in an update in this state will cause an error. Update // should cleanly handle this. Pulling on a branch (tested elsewhere) and // skipping that here. - err = repo.Update() + err = repo.fetch(ctx) if err != nil { - t.Error(err) + t.Fatal(err) } // Use Version to verify we are on the right version. v, err = repo.Version() - if v != "806b07b08faa21cfbdae93027904f80174679402" { - t.Error("Error checking checked out Git version") - } if err != nil { - t.Error(err) + t.Fatal(err) + } + if v != "806b07b08faa21cfbdae93027904f80174679402" { + t.Fatal("Error checking checked out Git version") } } -func TestBzrRepo(t *testing.T) { +func testBzrRepo(t *testing.T) { + t.Parallel() + if testing.Short() { t.Skip("Skipping slow test in short mode") } + ctx := context.Background() tempDir, err := ioutil.TempDir("", "go-vcs-bzr-tests") if err != nil { - t.Error(err) + t.Fatal(err) } defer func() { @@ -266,43 +300,43 @@ func TestBzrRepo(t *testing.T) { repo := &bzrRepo{rep} // Do an initial clone. - err = repo.Get() + err = repo.get(ctx) if err != nil { - t.Errorf("Unable to clone Bzr repo. Err was %s", err) + t.Fatalf("Unable to clone Bzr repo. Err was %s", err) } // Verify Bzr repo is a Bzr repo if !repo.CheckLocal() { - t.Error("Problem checking out repo or Bzr CheckLocal is not working") + t.Fatal("Problem checking out repo or Bzr CheckLocal is not working") } v, err := repo.Current() if err != nil { - t.Errorf("Error trying Bzr Current: %s", err) + t.Fatalf("Error trying Bzr Current: %s", err) } if v != "-1" { - t.Errorf("Current failed to detect Bzr on tip of branch. Got version: %s", v) + t.Fatalf("Current failed to detect Bzr on tip of branch. Got version: %s", v) } - err = repo.UpdateVersion("2") + err = repo.updateVersion(ctx, "2") if err != nil { - t.Errorf("Unable to update Bzr repo version. Err was %s", err) + t.Fatalf("Unable to update Bzr repo version. Err was %s", err) } // Use Version to verify we are on the right version. v, err = repo.Version() - if v != "2" { - t.Error("Error checking checked out Bzr version") - } if err != nil { - t.Error(err) + t.Fatal(err) + } + if v != "2" { + t.Fatal("Error checking checked out Bzr version") } v, err = repo.Current() if err != nil { - t.Errorf("Error trying Bzr Current: %s", err) + t.Fatalf("Error trying Bzr Current: %s", err) } if v != "2" { - t.Errorf("Current failed to detect Bzr on rev 2 of branch. Got version: %s", v) + t.Fatalf("Current failed to detect Bzr on rev 2 of branch. Got version: %s", v) } } diff --git a/vendor/github.com/sdboyer/gps/vcs_source.go b/vendor/github.com/sdboyer/gps/vcs_source.go index 940dd82ec8..781a5cc2d5 100644 --- a/vendor/github.com/sdboyer/gps/vcs_source.go +++ b/vendor/github.com/sdboyer/gps/vcs_source.go @@ -2,191 +2,177 @@ package gps import ( "bytes" + "context" "fmt" "os" "os/exec" "path/filepath" "strings" - "sync" + "time" "github.com/Masterminds/semver" - "github.com/Masterminds/vcs" "github.com/sdboyer/gps/internal/fs" + "github.com/sdboyer/gps/pkgtree" ) -// Kept here as a reference in case it does become important to implement a -// vcsSource interface. Remove if/when it becomes clear we're never going to do -// this. -//type vcsSource interface { -//syncLocal() error -//ensureLocal() error -//listLocalVersionPairs() ([]PairedVersion, sourceExistence, error) -//listUpstreamVersionPairs() ([]PairedVersion, sourceExistence, error) -//hasRevision(Revision) (bool, error) -//checkout(Version) error -//exportVersionTo(Version, string) error -//} +type baseVCSSource struct { + repo ctxRepo +} -// gitSource is a generic git repository implementation that should work with -// all standard git remotes. -type gitSource struct { - baseVCSSource +func (bs *baseVCSSource) sourceType() string { + return string(bs.repo.Vcs()) } -func (s *gitSource) exportVersionTo(v Version, to string) error { - // Get away without syncing local, if we can - r := s.crepo.r - // ...but local repo does have to at least exist - if err := s.ensureCacheExistence(); err != nil { - return err +func (bs *baseVCSSource) existsLocally(ctx context.Context) bool { + return bs.repo.CheckLocal() +} + +// TODO reimpl for git +func (bs *baseVCSSource) existsUpstream(ctx context.Context) bool { + return !bs.repo.Ping() +} + +func (bs *baseVCSSource) upstreamURL() string { + return bs.repo.Remote() +} + +func (bs *baseVCSSource) getManifestAndLock(ctx context.Context, pr ProjectRoot, r Revision, an ProjectAnalyzer) (Manifest, Lock, error) { + err := bs.repo.updateVersion(ctx, r.String()) + if err != nil { + return nil, nil, unwrapVcsErr(err) } - if err := os.MkdirAll(to, 0777); err != nil { - return err + m, l, err := an.DeriveManifestAndLock(bs.repo.LocalPath(), pr) + if err != nil { + return nil, nil, err } - do := func() error { - s.crepo.mut.Lock() - defer s.crepo.mut.Unlock() + if l != nil && l != Lock(nil) { + l = prepLock(l) + } - // Back up original index - idx, bak := filepath.Join(r.LocalPath(), ".git", "index"), filepath.Join(r.LocalPath(), ".git", "origindex") - err := fs.RenameWithFallback(idx, bak) - if err != nil { - return err - } + return prepManifest(m), l, nil +} + +func (bs *baseVCSSource) revisionPresentIn(r Revision) (bool, error) { + return bs.repo.IsReference(string(r)), nil +} - // could have an err here...but it's hard to imagine how? - defer fs.RenameWithFallback(bak, idx) +// initLocal clones/checks out the upstream repository to disk for the first +// time. +func (bs *baseVCSSource) initLocal(ctx context.Context) error { + err := bs.repo.get(ctx) - vstr := v.String() - if rv, ok := v.(PairedVersion); ok { - vstr = rv.Underlying().String() - } + if err != nil { + return unwrapVcsErr(err) + } + return nil +} - out, err := runFromRepoDir(r, "git", "read-tree", vstr) - if err != nil { - return fmt.Errorf("%s: %s", out, err) - } +// updateLocal ensures the local data (versions and code) we have about the +// source is fully up to date with that of the canonical upstream source. +func (bs *baseVCSSource) updateLocal(ctx context.Context) error { + err := bs.repo.fetch(ctx) - // Ensure we have exactly one trailing slash - to = strings.TrimSuffix(to, string(os.PathSeparator)) + string(os.PathSeparator) - // Checkout from our temporary index to the desired target location on - // disk; now it's git's job to make it fast. - // - // Sadly, this approach *does* also write out vendor dirs. There doesn't - // appear to be a way to make checkout-index respect sparse checkout - // rules (-a supercedes it). The alternative is using plain checkout, - // though we have a bunch of housekeeping to do to set up, then tear - // down, the sparse checkout controls, as well as restore the original - // index and HEAD. - out, err = runFromRepoDir(r, "git", "checkout-index", "-a", "--prefix="+to) - if err != nil { - return fmt.Errorf("%s: %s", out, err) - } - return nil + if err != nil { + return unwrapVcsErr(err) } + return nil +} - err := do() - if err != nil && !s.crepo.synced { - // If there was an err, and the repo cache is stale, it might've been - // beacuse we were missing the rev/ref. Try syncing, then run the export - // op again. - err = s.syncLocal() - if err != nil { - return err - } - err = do() +func (bs *baseVCSSource) listPackages(ctx context.Context, pr ProjectRoot, r Revision) (ptree pkgtree.PackageTree, err error) { + err = bs.repo.updateVersion(ctx, r.String()) + + if err != nil { + err = unwrapVcsErr(err) + } else { + ptree, err = pkgtree.ListPackages(bs.repo.LocalPath(), string(pr)) } - return err + return } -func (s *gitSource) listVersions() (vlist []Version, err error) { - s.baseVCSSource.lvmut.Lock() - defer s.baseVCSSource.lvmut.Unlock() +func (bs *baseVCSSource) exportRevisionTo(ctx context.Context, r Revision, to string) error { + // Only make the parent dir, as CopyDir will balk on trying to write to an + // empty but existing dir. + if err := os.MkdirAll(filepath.Dir(to), 0777); err != nil { + return err + } - if s.cvsync { - vlist = make([]Version, len(s.dc.vMap)) - k := 0 - for v, r := range s.dc.vMap { - vlist[k] = v.Is(r) - k++ - } + if err := bs.repo.updateVersion(ctx, r.String()); err != nil { + return unwrapVcsErr(err) + } - return + // TODO(sdboyer) this is a simplistic approach and relying on the tools + // themselves might make it faster, but git's the overwhelming case (and has + // its own method) so fine for now + return fs.CopyDir(bs.repo.LocalPath(), to) +} + +// gitSource is a generic git repository implementation that should work with +// all standard git remotes. +type gitSource struct { + baseVCSSource +} + +func (s *gitSource) exportRevisionTo(ctx context.Context, rev Revision, to string) error { + r := s.repo + + if err := os.MkdirAll(to, 0777); err != nil { + return err } - vlist, err = s.doListVersions() + // Back up original index + idx, bak := filepath.Join(r.LocalPath(), ".git", "index"), filepath.Join(r.LocalPath(), ".git", "origindex") + err := fs.RenameWithFallback(idx, bak) if err != nil { - return nil, err + return err + } + + // could have an err here...but it's hard to imagine how? + defer fs.RenameWithFallback(bak, idx) + + out, err := runFromRepoDir(ctx, r, "git", "read-tree", rev.String()) + if err != nil { + return fmt.Errorf("%s: %s", out, err) } - // Process the version data into the cache + // Ensure we have exactly one trailing slash + to = strings.TrimSuffix(to, string(os.PathSeparator)) + string(os.PathSeparator) + // Checkout from our temporary index to the desired target location on + // disk; now it's git's job to make it fast. // - // reset the rmap and vmap, as they'll be fully repopulated by this - s.dc.vMap = make(map[UnpairedVersion]Revision) - s.dc.rMap = make(map[Revision][]UnpairedVersion) - - for _, v := range vlist { - pv := v.(PairedVersion) - u, r := pv.Unpair(), pv.Underlying() - s.dc.vMap[u] = r - s.dc.rMap[r] = append(s.dc.rMap[r], u) + // Sadly, this approach *does* also write out vendor dirs. There doesn't + // appear to be a way to make checkout-index respect sparse checkout + // rules (-a supercedes it). The alternative is using plain checkout, + // though we have a bunch of housekeeping to do to set up, then tear + // down, the sparse checkout controls, as well as restore the original + // index and HEAD. + out, err = runFromRepoDir(ctx, r, "git", "checkout-index", "-a", "--prefix="+to) + if err != nil { + return fmt.Errorf("%s: %s", out, err) } - // Mark the cache as being in sync with upstream's version list - s.cvsync = true - return + + return nil } -func (s *gitSource) doListVersions() (vlist []Version, err error) { - r := s.crepo.r +func (s *gitSource) listVersions(ctx context.Context) (vlist []PairedVersion, err error) { + r := s.repo + var out []byte - c := exec.Command("git", "ls-remote", r.Remote()) + c := newMonitoredCmd(exec.Command("git", "ls-remote", r.Remote()), 30*time.Second) // Ensure no prompting for PWs - c.Env = mergeEnvLists([]string{"GIT_ASKPASS=", "GIT_TERMINAL_PROMPT=0"}, os.Environ()) - out, err = c.CombinedOutput() - - all := bytes.Split(bytes.TrimSpace(out), []byte("\n")) - if err != nil || len(all) == 0 { - // TODO(sdboyer) remove this path? it really just complicates things, for - // probably not much benefit - - // ls-remote failed, probably due to bad communication or a faulty - // upstream implementation. So fetch updates, then build the list - // locally - s.crepo.mut.Lock() - err = r.Update() - s.crepo.mut.Unlock() - if err != nil { - // Definitely have a problem, now - bail out - return - } - - // Upstream and cache must exist for this to have worked, so add that to - // searched and found - s.ex.s |= existsUpstream | existsInCache - s.ex.f |= existsUpstream | existsInCache - // Also, local is definitely now synced - s.crepo.synced = true - - s.crepo.mut.RLock() - out, err = runFromRepoDir(r, "git", "show-ref", "--dereference") - s.crepo.mut.RUnlock() - if err != nil { - // TODO(sdboyer) More-er proper-er error - return - } + c.cmd.Env = mergeEnvLists([]string{"GIT_ASKPASS=", "GIT_TERMINAL_PROMPT=0"}, os.Environ()) + out, err = c.combinedOutput(ctx) - all = bytes.Split(bytes.TrimSpace(out), []byte("\n")) - if len(all) == 0 { - return nil, fmt.Errorf("no versions available for %s (this is weird)", r.Remote()) - } + if err != nil { + return nil, err } - // Local cache may not actually exist here, but upstream definitely does - s.ex.s |= existsUpstream - s.ex.f |= existsUpstream + all := bytes.Split(bytes.TrimSpace(out), []byte("\n")) + if len(all) == 1 && len(all[0]) == 0 { + return nil, fmt.Errorf("no data returned from ls-remote") + } // Pull out the HEAD rev (it's always first) so we know what branches to // mark as default. This is, perhaps, not the best way to glean this, but it @@ -219,7 +205,7 @@ func (s *gitSource) doListVersions() (vlist []Version, err error) { smap := make(map[string]bool) uniq := 0 - vlist = make([]Version, len(all)-1) // less 1, because always ignore HEAD + vlist = make([]PairedVersion, len(all)-1) // less 1, because always ignore HEAD for _, pair := range all { var v PairedVersion if string(pair[46:51]) == "heads" { @@ -291,28 +277,14 @@ type gopkginSource struct { major uint64 } -func (s *gopkginSource) listVersions() (vlist []Version, err error) { - s.baseVCSSource.lvmut.Lock() - defer s.baseVCSSource.lvmut.Unlock() - - if s.cvsync { - vlist = make([]Version, len(s.dc.vMap)) - k := 0 - for v, r := range s.dc.vMap { - vlist[k] = v.Is(r) - k++ - } - - return - } - - ovlist, err := s.doListVersions() +func (s *gopkginSource) listVersions(ctx context.Context) ([]PairedVersion, error) { + ovlist, err := s.gitSource.listVersions(ctx) if err != nil { return nil, err } // Apply gopkg.in's filtering rules - vlist = make([]Version, len(ovlist)) + vlist := make([]PairedVersion, len(ovlist)) k := 0 var dbranch int // index of branch to be marked default var bsv *semver.Version @@ -363,21 +335,7 @@ func (s *gopkginSource) listVersions() (vlist []Version, err error) { }.Is(dbv.r) } - // Process the filtered version data into the cache - // - // reset the rmap and vmap, as they'll be fully repopulated by this - s.dc.vMap = make(map[UnpairedVersion]Revision) - s.dc.rMap = make(map[Revision][]UnpairedVersion) - - for _, v := range vlist { - pv := v.(PairedVersion) - u, r := pv.Unpair(), pv.Underlying() - s.dc.vMap[u] = r - s.dc.rMap[r] = append(s.dc.rMap[r], u) - } - // Mark the cache as being in sync with upstream's version list - s.cvsync = true - return + return vlist, nil } // bzrSource is a generic bzr repository implementation that should work with @@ -386,60 +344,11 @@ type bzrSource struct { baseVCSSource } -func (s *bzrSource) update() error { - r := s.crepo.r - - out, err := runFromRepoDir(r, "bzr", "pull") - if err != nil { - return vcs.NewRemoteError("Unable to update repository", err, string(out)) - } - - out, err = runFromRepoDir(r, "bzr", "update") - if err != nil { - return vcs.NewRemoteError("Unable to update repository", err, string(out)) - } - - return nil -} - -func (s *bzrSource) listVersions() (vlist []Version, err error) { - s.baseVCSSource.lvmut.Lock() - defer s.baseVCSSource.lvmut.Unlock() - - if s.cvsync { - vlist = make([]Version, len(s.dc.vMap)) - k := 0 - for v, r := range s.dc.vMap { - vlist[k] = v.Is(r) - k++ - } - - return - } +func (s *bzrSource) listVersions(ctx context.Context) ([]PairedVersion, error) { + r := s.repo - // Must first ensure cache checkout's existence - err = s.ensureCacheExistence() - if err != nil { - return - } - r := s.crepo.r - - // Local repo won't have all the latest refs if ensureCacheExistence() - // didn't create it - if !s.crepo.synced { - s.crepo.mut.Lock() - err = s.update() - s.crepo.mut.Unlock() - if err != nil { - return - } - - s.crepo.synced = true - } - - var out []byte // Now, list all the tags - out, err = runFromRepoDir(r, "bzr", "tags", "--show-ids", "-v") + out, err := runFromRepoDir(ctx, r, "bzr", "tags", "--show-ids", "-v") if err != nil { return nil, fmt.Errorf("%s: %s", err, string(out)) } @@ -447,41 +356,28 @@ func (s *bzrSource) listVersions() (vlist []Version, err error) { all := bytes.Split(bytes.TrimSpace(out), []byte("\n")) var branchrev []byte - branchrev, err = runFromRepoDir(r, "bzr", "version-info", "--custom", "--template={revision_id}", "--revision=branch:.") + branchrev, err = runFromRepoDir(ctx, r, "bzr", "version-info", "--custom", "--template={revision_id}", "--revision=branch:.") br := string(branchrev) if err != nil { return nil, fmt.Errorf("%s: %s", err, br) } - // Both commands completed successfully, so there's no further possibility - // of errors. That means it's now safe to reset the rmap and vmap, as - // they're about to be fully repopulated. - s.dc.vMap = make(map[UnpairedVersion]Revision) - s.dc.rMap = make(map[Revision][]UnpairedVersion) - vlist = make([]Version, len(all)+1) + vlist := make([]PairedVersion, 0, len(all)+1) // Now, all the tags. - for k, line := range all { + for _, line := range all { idx := bytes.IndexByte(line, 32) // space v := NewVersion(string(line[:idx])) r := Revision(bytes.TrimSpace(line[idx:])) - - s.dc.vMap[v] = r - s.dc.rMap[r] = append(s.dc.rMap[r], v) - vlist[k] = v.Is(r) + vlist = append(vlist, v.Is(r)) } // Last, add the default branch, hardcoding the visual representation of it // that bzr uses when operating in the workflow mode we're using. v := newDefaultBranch("(default)") - rev := Revision(string(branchrev)) - s.dc.vMap[v] = rev - s.dc.rMap[rev] = append(s.dc.rMap[rev], v) - vlist[len(vlist)-1] = v.Is(rev) + vlist = append(vlist, v.Is(Revision(string(branchrev)))) - // Cache is now in sync with upstream's version list - s.cvsync = true - return + return vlist, nil } // hgSource is a generic hg repository implementation that should work with @@ -490,61 +386,12 @@ type hgSource struct { baseVCSSource } -func (s *hgSource) update() error { - r := s.crepo.r - - out, err := runFromRepoDir(r, "hg", "pull") - if err != nil { - return vcs.NewLocalError("Unable to update checked out version", err, string(out)) - } - - out, err = runFromRepoDir(r, "hg", "update") - if err != nil { - return vcs.NewLocalError("Unable to update checked out version", err, string(out)) - } - - return nil -} - -func (s *hgSource) listVersions() (vlist []Version, err error) { - s.baseVCSSource.lvmut.Lock() - defer s.baseVCSSource.lvmut.Unlock() - - if s.cvsync { - vlist = make([]Version, len(s.dc.vMap)) - k := 0 - for v, r := range s.dc.vMap { - vlist[k] = v.Is(r) - k++ - } - - return - } - - // Must first ensure cache checkout's existence - err = s.ensureCacheExistence() - if err != nil { - return - } - r := s.crepo.r - - // Local repo won't have all the latest refs if ensureCacheExistence() - // didn't create it - if !s.crepo.synced { - s.crepo.mut.Lock() - err = unwrapVcsErr(s.update()) - s.crepo.mut.Unlock() - if err != nil { - return - } - - s.crepo.synced = true - } - - var out []byte +func (s *hgSource) listVersions(ctx context.Context) ([]PairedVersion, error) { + var vlist []PairedVersion + r := s.repo // Now, list all the tags - out, err = runFromRepoDir(r, "hg", "tags", "--debug", "--verbose") + out, err := runFromRepoDir(ctx, r, "hg", "tags", "--debug", "--verbose") if err != nil { return nil, fmt.Errorf("%s: %s", err, string(out)) } @@ -578,7 +425,7 @@ func (s *hgSource) listVersions() (vlist []Version, err error) { // bookmarks next, because the presence of the magic @ bookmark has to // determine how we handle the branches var magicAt bool - out, err = runFromRepoDir(r, "hg", "bookmarks", "--debug") + out, err = runFromRepoDir(ctx, r, "hg", "bookmarks", "--debug") if err != nil { // better nothing than partial and misleading return nil, fmt.Errorf("%s: %s", err, string(out)) @@ -600,7 +447,7 @@ func (s *hgSource) listVersions() (vlist []Version, err error) { idx := bytes.IndexByte(pair[0], 32) // space // if it's the magic @ marker, make that the default branch str := string(pair[0][:idx]) - var v Version + var v PairedVersion if str == "@" { magicAt = true v = newDefaultBranch(str).Is(Revision(pair[1])).(PairedVersion) @@ -611,7 +458,7 @@ func (s *hgSource) listVersions() (vlist []Version, err error) { } } - out, err = runFromRepoDir(r, "hg", "branches", "-c", "--debug") + out, err = runFromRepoDir(ctx, r, "hg", "branches", "-c", "--debug") if err != nil { // better nothing than partial and misleading return nil, fmt.Errorf("%s: %s", err, string(out)) @@ -630,7 +477,7 @@ func (s *hgSource) listVersions() (vlist []Version, err error) { str := string(pair[0][:idx]) // if there was no magic @ bookmark, and this is mercurial's magic // "default" branch, then mark it as default branch - var v Version + var v PairedVersion if !magicAt && str == "default" { v = newDefaultBranch(str).Is(Revision(pair[1])).(PairedVersion) } else { @@ -639,54 +486,12 @@ func (s *hgSource) listVersions() (vlist []Version, err error) { vlist = append(vlist, v) } - // reset the rmap and vmap, as they'll be fully repopulated by this - s.dc.vMap = make(map[UnpairedVersion]Revision) - s.dc.rMap = make(map[Revision][]UnpairedVersion) - - for _, v := range vlist { - pv := v.(PairedVersion) - u, r := pv.Unpair(), pv.Underlying() - s.dc.vMap[u] = r - s.dc.rMap[r] = append(s.dc.rMap[r], u) - } - - // Cache is now in sync with upstream's version list - s.cvsync = true - return + return vlist, nil } type repo struct { - // Path to the root of the default working copy (NOT the repo itself) - rpath string - - // Mutex controlling general access to the repo - mut sync.RWMutex - // Object for direct repo interaction - r vcs.Repo - - // Whether or not the cache repo is in sync (think dvcs) with upstream - synced bool -} - -func (r *repo) exportVersionTo(v Version, to string) error { - r.mut.Lock() - defer r.mut.Unlock() - - // TODO(sdboyer) sloppy - this update may not be necessary - if !r.synced { - err := r.r.Update() - if err != nil { - return fmt.Errorf("err on attempting to update repo: %s", unwrapVcsErr(err)) - } - } - - r.r.UpdateVersion(v.String()) - - // TODO(sdboyer) this is a simplistic approach and relying on the tools - // themselves might make it faster, but git's the overwhelming case (and has - // its own method) so fine for now - return fs.CopyDir(r.rpath, to) + r ctxRepo } // This func copied from Masterminds/vcs so we can exec our own commands @@ -704,15 +509,3 @@ NextVar: } return out } - -func stripVendor(path string, info os.FileInfo, err error) error { - if info.Name() == "vendor" { - if _, err := os.Lstat(path); err == nil { - if info.IsDir() { - return removeAll(path) - } - } - } - - return nil -} diff --git a/vendor/github.com/sdboyer/gps/vcs_source_test.go b/vendor/github.com/sdboyer/gps/vcs_source_test.go new file mode 100644 index 0000000000..0794c1bc03 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/vcs_source_test.go @@ -0,0 +1,516 @@ +package gps + +import ( + "context" + "io/ioutil" + "net/url" + "os/exec" + "reflect" + "sync" + "testing" +) + +// Parent test that executes all the slow vcs interaction tests in parallel. +func TestSlowVcs(t *testing.T) { + t.Run("write-deptree", testWriteDepTree) + t.Run("source-gateway", testSourceGateway) + t.Run("bzr-repo", testBzrRepo) + t.Run("bzr-source", testBzrSourceInteractions) + t.Run("svn-repo", testSvnRepo) + // TODO(sdboyer) svn-source + t.Run("hg-repo", testHgRepo) + t.Run("hg-source", testHgSourceInteractions) + t.Run("git-repo", testGitRepo) + t.Run("git-source", testGitSourceInteractions) + t.Run("gopkgin-source", testGopkginSourceInteractions) +} + +func testGitSourceInteractions(t *testing.T) { + t.Parallel() + + // This test is slowish, skip it on -short + if testing.Short() { + t.Skip("Skipping git source version fetching test in short mode") + } + requiresBins(t, "git") + + cpath, err := ioutil.TempDir("", "smcache") + if err != nil { + t.Errorf("Failed to create temp dir: %s", err) + } + defer func() { + if err := removeAll(cpath); err != nil { + t.Errorf("removeAll failed: %s", err) + } + }() + + n := "github.com/sdboyer/gpkt" + un := "https://" + n + u, err := url.Parse(un) + if err != nil { + t.Fatalf("Error parsing URL %s: %s", un, err) + } + mb := maybeGitSource{ + url: u, + } + + ctx := context.Background() + superv := newSupervisor(ctx) + isrc, state, err := mb.try(ctx, cpath, newMemoryCache(), superv) + if err != nil { + t.Fatalf("Unexpected error while setting up gitSource for test repo: %s", err) + } + + wantstate := sourceIsSetUp | sourceExistsUpstream | sourceHasLatestVersionList + if state != wantstate { + t.Errorf("Expected return state to be %v, got %v", wantstate, state) + } + + err = isrc.initLocal(ctx) + if err != nil { + t.Fatalf("Error on cloning git repo: %s", err) + } + + src, ok := isrc.(*gitSource) + if !ok { + t.Fatalf("Expected a gitSource, got a %T", isrc) + } + + if un != src.upstreamURL() { + t.Errorf("Expected %s as source URL, got %s", un, src.upstreamURL()) + } + + pvlist, err := src.listVersions(ctx) + if err != nil { + t.Fatalf("Unexpected error getting version pairs from git repo: %s", err) + } + + vlist := hidePair(pvlist) + // check that an expected rev is present + is, err := src.revisionPresentIn(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")) + if err != nil { + t.Errorf("Unexpected error while checking revision presence: %s", err) + } else if !is { + t.Errorf("Revision that should exist was not present") + } + + if len(vlist) != 7 { + t.Errorf("git test repo should've produced seven versions, got %v: vlist was %s", len(vlist), vlist) + } else { + SortForUpgrade(vlist) + evl := []Version{ + NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), + NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), + NewVersion("v1.0.0").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), + newDefaultBranch("master").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), + NewBranch("v1").Is(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")), + NewBranch("v1.1").Is(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")), + NewBranch("v3").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), + } + if !reflect.DeepEqual(vlist, evl) { + t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + } + } + + // recheck that rev is present, this time interacting with cache differently + is, err = src.revisionPresentIn(Revision("30605f6ac35fcb075ad0bfa9296f90a7d891523e")) + if err != nil { + t.Errorf("Unexpected error while re-checking revision presence: %s", err) + } else if !is { + t.Errorf("Revision that should exist was not present on re-check") + } +} + +func testGopkginSourceInteractions(t *testing.T) { + t.Parallel() + + // This test is slowish, skip it on -short + if testing.Short() { + t.Skip("Skipping gopkg.in source version fetching test in short mode") + } + requiresBins(t, "git") + + cpath, err := ioutil.TempDir("", "smcache") + if err != nil { + t.Errorf("Failed to create temp dir: %s", err) + } + defer func() { + if err := removeAll(cpath); err != nil { + t.Errorf("removeAll failed: %s", err) + } + }() + + tfunc := func(opath, n string, major uint64, evl []Version) { + un := "https://" + n + u, err := url.Parse(un) + if err != nil { + t.Errorf("URL was bad, lolwut? errtext: %s", err) + return + } + mb := maybeGopkginSource{ + opath: opath, + url: u, + major: major, + } + + ctx := context.Background() + superv := newSupervisor(ctx) + isrc, state, err := mb.try(ctx, cpath, newMemoryCache(), superv) + if err != nil { + t.Errorf("Unexpected error while setting up gopkginSource for test repo: %s", err) + return + } + + wantstate := sourceIsSetUp | sourceExistsUpstream | sourceHasLatestVersionList + if state != wantstate { + t.Errorf("Expected return state to be %v, got %v", wantstate, state) + } + + err = isrc.initLocal(ctx) + if err != nil { + t.Fatalf("Error on cloning git repo: %s", err) + } + + src, ok := isrc.(*gopkginSource) + if !ok { + t.Errorf("Expected a gopkginSource, got a %T", isrc) + return + } + + if un != src.upstreamURL() { + t.Errorf("Expected %s as source URL, got %s", un, src.upstreamURL()) + } + if src.major != major { + t.Errorf("Expected %v as major version filter on gopkginSource, got %v", major, src.major) + } + + // check that an expected rev is present + rev := evl[0].(PairedVersion).Underlying() + is, err := src.revisionPresentIn(rev) + if err != nil { + t.Errorf("Unexpected error while checking revision presence: %s", err) + } else if !is { + t.Errorf("Revision %s that should exist was not present", rev) + } + + pvlist, err := src.listVersions(ctx) + if err != nil { + t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) + } + + vlist := hidePair(pvlist) + if len(vlist) != len(evl) { + t.Errorf("gopkgin test repo should've produced %v versions, got %v", len(evl), len(vlist)) + } else { + SortForUpgrade(vlist) + if !reflect.DeepEqual(vlist, evl) { + t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + } + } + + // Run again, this time to ensure cache outputs correctly + pvlist, err = src.listVersions(ctx) + if err != nil { + t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) + } + + vlist = hidePair(pvlist) + if len(vlist) != len(evl) { + t.Errorf("gopkgin test repo should've produced %v versions, got %v", len(evl), len(vlist)) + } else { + SortForUpgrade(vlist) + if !reflect.DeepEqual(vlist, evl) { + t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + } + } + + // recheck that rev is present, this time interacting with cache differently + is, err = src.revisionPresentIn(rev) + if err != nil { + t.Errorf("Unexpected error while re-checking revision presence: %s", err) + } else if !is { + t.Errorf("Revision that should exist was not present on re-check") + } + } + + // simultaneously run for v1, v2, and v3 filters of the target repo + wg := &sync.WaitGroup{} + wg.Add(3) + go func() { + tfunc("gopkg.in/sdboyer/gpkt.v1", "github.com/sdboyer/gpkt", 1, []Version{ + NewVersion("v1.1.0").Is(Revision("b2cb48dda625f6640b34d9ffb664533359ac8b91")), + NewVersion("v1.0.0").Is(Revision("bf85021c0405edbc4f3648b0603818d641674f72")), + newDefaultBranch("v1.1").Is(Revision("f1fbc520489a98306eb28c235204e39fa8a89c84")), + NewBranch("v1").Is(Revision("e3777f683305eafca223aefe56b4e8ecf103f467")), + }) + wg.Done() + }() + + go func() { + tfunc("gopkg.in/sdboyer/gpkt.v2", "github.com/sdboyer/gpkt", 2, []Version{ + NewVersion("v2.0.0").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), + }) + wg.Done() + }() + + go func() { + tfunc("gopkg.in/sdboyer/gpkt.v3", "github.com/sdboyer/gpkt", 3, []Version{ + newDefaultBranch("v3").Is(Revision("4a54adf81c75375d26d376459c00d5ff9b703e5e")), + }) + wg.Done() + }() + + wg.Wait() +} + +func testBzrSourceInteractions(t *testing.T) { + t.Parallel() + + // This test is quite slow (ugh bzr), so skip it on -short + if testing.Short() { + t.Skip("Skipping bzr source version fetching test in short mode") + } + requiresBins(t, "bzr") + + cpath, err := ioutil.TempDir("", "smcache") + if err != nil { + t.Errorf("Failed to create temp dir: %s", err) + } + defer func() { + if err := removeAll(cpath); err != nil { + t.Errorf("removeAll failed: %s", err) + } + }() + + n := "launchpad.net/govcstestbzrrepo" + un := "https://" + n + u, err := url.Parse(un) + if err != nil { + t.Fatalf("Error parsing URL %s: %s", un, err) + } + mb := maybeBzrSource{ + url: u, + } + + ctx := context.Background() + superv := newSupervisor(ctx) + isrc, state, err := mb.try(ctx, cpath, newMemoryCache(), superv) + if err != nil { + t.Fatalf("Unexpected error while setting up bzrSource for test repo: %s", err) + } + + wantstate := sourceIsSetUp | sourceExistsUpstream + if state != wantstate { + t.Errorf("Expected return state to be %v, got %v", wantstate, state) + } + + err = isrc.initLocal(ctx) + if err != nil { + t.Fatalf("Error on cloning git repo: %s", err) + } + + src, ok := isrc.(*bzrSource) + if !ok { + t.Fatalf("Expected a bzrSource, got a %T", isrc) + } + + if state != wantstate { + t.Errorf("Expected return state to be %v, got %v", wantstate, state) + } + if un != src.upstreamURL() { + t.Errorf("Expected %s as source URL, got %s", un, src.upstreamURL()) + } + evl := []Version{ + NewVersion("1.0.0").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")), + newDefaultBranch("(default)").Is(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")), + } + + // check that an expected rev is present + is, err := src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")) + if err != nil { + t.Errorf("Unexpected error while checking revision presence: %s", err) + } else if !is { + t.Errorf("Revision that should exist was not present") + } + + pvlist, err := src.listVersions(ctx) + if err != nil { + t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err) + } + + vlist := hidePair(pvlist) + if len(vlist) != 2 { + t.Errorf("bzr test repo should've produced two versions, got %v", len(vlist)) + } else { + SortForUpgrade(vlist) + if !reflect.DeepEqual(vlist, evl) { + t.Errorf("bzr version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + } + } + + // Run again, this time to ensure cache outputs correctly + pvlist, err = src.listVersions(ctx) + if err != nil { + t.Errorf("Unexpected error getting version pairs from bzr repo: %s", err) + } + + vlist = hidePair(pvlist) + if len(vlist) != 2 { + t.Errorf("bzr test repo should've produced two versions, got %v", len(vlist)) + } else { + SortForUpgrade(vlist) + if !reflect.DeepEqual(vlist, evl) { + t.Errorf("bzr version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + } + } + + // recheck that rev is present, this time interacting with cache differently + is, err = src.revisionPresentIn(Revision("matt@mattfarina.com-20150731135137-pbphasfppmygpl68")) + if err != nil { + t.Errorf("Unexpected error while re-checking revision presence: %s", err) + } else if !is { + t.Errorf("Revision that should exist was not present on re-check") + } +} + +func testHgSourceInteractions(t *testing.T) { + t.Parallel() + + // This test is slow, so skip it on -short + if testing.Short() { + t.Skip("Skipping hg source version fetching test in short mode") + } + requiresBins(t, "hg") + + cpath, err := ioutil.TempDir("", "smcache") + if err != nil { + t.Errorf("Failed to create temp dir: %s", err) + } + defer func() { + if err := removeAll(cpath); err != nil { + t.Errorf("removeAll failed: %s", err) + } + }() + + tfunc := func(n string, evl []Version) { + un := "https://" + n + u, err := url.Parse(un) + if err != nil { + t.Errorf("URL was bad, lolwut? errtext: %s", err) + return + } + mb := maybeHgSource{ + url: u, + } + + ctx := context.Background() + superv := newSupervisor(ctx) + isrc, state, err := mb.try(ctx, cpath, newMemoryCache(), superv) + if err != nil { + t.Errorf("Unexpected error while setting up hgSource for test repo: %s", err) + return + } + + wantstate := sourceIsSetUp | sourceExistsUpstream + if state != wantstate { + t.Errorf("Expected return state to be %v, got %v", wantstate, state) + } + + err = isrc.initLocal(ctx) + if err != nil { + t.Fatalf("Error on cloning git repo: %s", err) + } + + src, ok := isrc.(*hgSource) + if !ok { + t.Errorf("Expected a hgSource, got a %T", isrc) + return + } + + if state != wantstate { + t.Errorf("Expected return state to be %v, got %v", wantstate, state) + } + if un != src.upstreamURL() { + t.Errorf("Expected %s as source URL, got %s", un, src.upstreamURL()) + } + + // check that an expected rev is present + is, err := src.revisionPresentIn(Revision("103d1bddef2199c80aad7c42041223083d613ef9")) + if err != nil { + t.Errorf("Unexpected error while checking revision presence: %s", err) + } else if !is { + t.Errorf("Revision that should exist was not present") + } + + pvlist, err := src.listVersions(ctx) + if err != nil { + t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) + } + + vlist := hidePair(pvlist) + if len(vlist) != len(evl) { + t.Errorf("hg test repo should've produced %v versions, got %v", len(evl), len(vlist)) + } else { + SortForUpgrade(vlist) + if !reflect.DeepEqual(vlist, evl) { + t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + } + } + + // Run again, this time to ensure cache outputs correctly + pvlist, err = src.listVersions(ctx) + if err != nil { + t.Errorf("Unexpected error getting version pairs from hg repo: %s", err) + } + + vlist = hidePair(pvlist) + if len(vlist) != len(evl) { + t.Errorf("hg test repo should've produced %v versions, got %v", len(evl), len(vlist)) + } else { + SortForUpgrade(vlist) + if !reflect.DeepEqual(vlist, evl) { + t.Errorf("Version list was not what we expected:\n\t(GOT): %s\n\t(WNT): %s", vlist, evl) + } + } + + // recheck that rev is present, this time interacting with cache differently + is, err = src.revisionPresentIn(Revision("103d1bddef2199c80aad7c42041223083d613ef9")) + if err != nil { + t.Errorf("Unexpected error while re-checking revision presence: %s", err) + } else if !is { + t.Errorf("Revision that should exist was not present on re-check") + } + } + + // simultaneously run for both the repo with and without the magic bookmark + donech := make(chan struct{}) + go func() { + tfunc("bitbucket.org/sdboyer/withbm", []Version{ + NewVersion("v1.0.0").Is(Revision("aa110802a0c64195d0a6c375c9f66668827c90b4")), + newDefaultBranch("@").Is(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")), + NewBranch("another").Is(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")), + NewBranch("default").Is(Revision("3d466f437f6616da594bbab6446cc1cb4328d1bb")), + NewBranch("newbranch").Is(Revision("5e2a01be9aee942098e44590ae545c7143da9675")), + }) + close(donech) + }() + + tfunc("bitbucket.org/sdboyer/nobm", []Version{ + NewVersion("v1.0.0").Is(Revision("aa110802a0c64195d0a6c375c9f66668827c90b4")), + newDefaultBranch("default").Is(Revision("3d466f437f6616da594bbab6446cc1cb4328d1bb")), + NewBranch("another").Is(Revision("b10d05d581e5401f383e48ccfeb84b48fde99d06")), + NewBranch("newbranch").Is(Revision("5e2a01be9aee942098e44590ae545c7143da9675")), + }) + + <-donech +} + +// Fail a test if the specified binaries aren't installed. +func requiresBins(t *testing.T, bins ...string) { + for _, b := range bins { + _, err := exec.LookPath(b) + if err != nil { + t.Fatalf("%s is not installed", b) + } + } +} diff --git a/vendor/github.com/sdboyer/gps/version.go b/vendor/github.com/sdboyer/gps/version.go index 00dab3122b..25308ba390 100644 --- a/vendor/github.com/sdboyer/gps/version.go +++ b/vendor/github.com/sdboyer/gps/version.go @@ -69,15 +69,10 @@ type UnpairedVersion interface { } // types are weird -func (branchVersion) _private() {} func (branchVersion) _pair(bool) {} -func (plainVersion) _private() {} func (plainVersion) _pair(bool) {} -func (semVersion) _private() {} func (semVersion) _pair(bool) {} -func (versionPair) _private() {} func (versionPair) _pair(int) {} -func (Revision) _private() {} // NewBranch creates a new Version to represent a floating version (in // general, a branch). @@ -120,6 +115,10 @@ func (r Revision) String() string { return string(r) } +func (r Revision) typedString() string { + return "r-" + string(r) +} + // Type indicates the type of version - for revisions, "revision". func (r Revision) Type() VersionType { return IsRevision @@ -192,6 +191,10 @@ func (v branchVersion) String() string { return string(v.name) } +func (v branchVersion) typedString() string { + return fmt.Sprintf("b-%s", v.String()) +} + func (v branchVersion) Type() VersionType { return IsBranch } @@ -265,6 +268,10 @@ func (v plainVersion) String() string { return string(v) } +func (v plainVersion) typedString() string { + return fmt.Sprintf("pv-%s", v.String()) +} + func (v plainVersion) Type() VersionType { return IsVersion } @@ -344,6 +351,10 @@ func (v semVersion) String() string { return str } +func (v semVersion) typedString() string { + return fmt.Sprintf("sv-%s", v.String()) +} + func (v semVersion) Type() VersionType { return IsSemver } @@ -424,6 +435,10 @@ func (v versionPair) String() string { return v.v.String() } +func (v versionPair) typedString() string { + return fmt.Sprintf("%s-%s", v.Unpair().typedString(), v.Underlying().typedString()) +} + func (v versionPair) Type() VersionType { return v.v.Type() } @@ -555,30 +570,6 @@ func compareVersionType(l, r Version) int { panic("unknown version type") } -// typedVersionString emits the normal stringified representation of the -// provided version, prefixed with a string that uniquely identifies the type of -// the version. -func typedVersionString(v Version) string { - var prefix string - switch tv := v.(type) { - case branchVersion: - prefix = "b" - case plainVersion: - prefix = "pv" - case semVersion: - prefix = "sv" - case Revision: - prefix = "r" - case versionPair: - // NOTE: The behavior suits what we want for input hashing purposes, but - // pulling out both the unpaired and underlying makes the behavior - // inconsistent with how a normal String() op works on a pairedVersion. - return fmt.Sprintf("%s-%s", typedVersionString(tv.Unpair()), typedVersionString(tv.Underlying())) - } - - return fmt.Sprintf("%s-%s", prefix, v.String()) -} - // SortForUpgrade sorts a slice of []Version in roughly descending order, so // that presumably newer versions are visited first. The rules are: // @@ -610,6 +601,12 @@ func SortForUpgrade(vl []Version) { sort.Sort(upgradeVersionSorter(vl)) } +// SortPairedForUpgrade has the same behavior as SortForUpgrade, but operates on +// []PairedVersion types. +func SortPairedForUpgrade(vl []PairedVersion) { + sort.Sort(pvupgradeVersionSorter(vl)) +} + // SortForDowngrade sorts a slice of []Version in roughly ascending order, so // that presumably older versions are visited first. // @@ -637,8 +634,13 @@ func SortForDowngrade(vl []Version) { sort.Sort(downgradeVersionSorter(vl)) } +// SortPairedForDowngrade has the same behavior as SortForDowngrade, but +// operates on []PairedVersion types. +func SortPairedForDowngrade(vl []PairedVersion) { + sort.Sort(pvdowngradeVersionSorter(vl)) +} + type upgradeVersionSorter []Version -type downgradeVersionSorter []Version func (vs upgradeVersionSorter) Len() int { return len(vs) @@ -648,6 +650,27 @@ func (vs upgradeVersionSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } +func (vs upgradeVersionSorter) Less(i, j int) bool { + l, r := vs[i], vs[j] + return vLess(l, r, false) +} + +type pvupgradeVersionSorter []PairedVersion + +func (vs pvupgradeVersionSorter) Len() int { + return len(vs) +} + +func (vs pvupgradeVersionSorter) Swap(i, j int) { + vs[i], vs[j] = vs[j], vs[i] +} +func (vs pvupgradeVersionSorter) Less(i, j int) bool { + l, r := vs[i], vs[j] + return vLess(l, r, false) +} + +type downgradeVersionSorter []Version + func (vs downgradeVersionSorter) Len() int { return len(vs) } @@ -656,9 +679,26 @@ func (vs downgradeVersionSorter) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] } -func (vs upgradeVersionSorter) Less(i, j int) bool { +func (vs downgradeVersionSorter) Less(i, j int) bool { l, r := vs[i], vs[j] + return vLess(l, r, true) +} + +type pvdowngradeVersionSorter []PairedVersion + +func (vs pvdowngradeVersionSorter) Len() int { + return len(vs) +} +func (vs pvdowngradeVersionSorter) Swap(i, j int) { + vs[i], vs[j] = vs[j], vs[i] +} +func (vs pvdowngradeVersionSorter) Less(i, j int) bool { + l, r := vs[i], vs[j] + return vLess(l, r, true) +} + +func vLess(l, r Version, down bool) bool { if tl, ispair := l.(versionPair); ispair { l = tl.v } @@ -700,52 +740,37 @@ func (vs upgradeVersionSorter) Less(i, j int) bool { if (lpre && !rpre) || (!lpre && rpre) { return lpre } + + if down { + return lsv.LessThan(rsv) + } return lsv.GreaterThan(rsv) } -func (vs downgradeVersionSorter) Less(i, j int) bool { - l, r := vs[i], vs[j] - - if tl, ispair := l.(versionPair); ispair { - l = tl.v - } - if tr, ispair := r.(versionPair); ispair { - r = tr.v +func hidePair(pvl []PairedVersion) []Version { + vl := make([]Version, 0, len(pvl)) + for _, v := range pvl { + vl = append(vl, v) } + return vl +} - switch compareVersionType(l, r) { - case -1: - return true - case 1: - return false - case 0: - break - default: - panic("unreachable") +// VersionComponentStrings decomposes a Version into the underlying number, branch and revision +func VersionComponentStrings(v Version) (revision string, branch string, version string) { + switch tv := v.(type) { + case UnpairedVersion: + case Revision: + revision = tv.String() + case PairedVersion: + revision = tv.Underlying().String() } - switch tl := l.(type) { - case branchVersion: - tr := r.(branchVersion) - if tl.isDefault != tr.isDefault { - // If they're not both defaults, then return the left val: if left - // is the default, then it is "less" (true) b/c we want it earlier. - // Else the right is the default, and so the left should be later - // (false). - return tl.isDefault - } - return l.String() < r.String() - case Revision, plainVersion: - // All that we can do now is alpha sort - return l.String() < r.String() + switch v.Type() { + case IsBranch: + branch = v.String() + case IsSemver, IsVersion: + version = v.String() } - // This ensures that pre-release versions are always sorted after ALL - // full-release versions - lsv, rsv := l.(semVersion).sv, r.(semVersion).sv - lpre, rpre := lsv.Prerelease() == "", rsv.Prerelease() == "" - if (lpre && !rpre) || (!lpre && rpre) { - return lpre - } - return lsv.LessThan(rsv) + return } diff --git a/vendor/github.com/sdboyer/gps/version_queue.go b/vendor/github.com/sdboyer/gps/version_queue.go index dc5da98a03..148600dce6 100644 --- a/vendor/github.com/sdboyer/gps/version_queue.go +++ b/vendor/github.com/sdboyer/gps/version_queue.go @@ -41,7 +41,7 @@ func newVersionQueue(id ProjectIdentifier, lockv, prefv Version, b sourceBridge) if len(vq.pi) == 0 { var err error - vq.pi, err = vq.b.ListVersions(vq.id) + vq.pi, err = vq.b.listVersions(vq.id) if err != nil { // TODO(sdboyer) pushing this error this early entails that we // unconditionally deep scan (e.g. vendor), as well as hitting the @@ -87,11 +87,11 @@ func (vq *versionQueue) advance(fail error) error { vq.allLoaded = true var vltmp []Version - vltmp, vq.adverr = vq.b.ListVersions(vq.id) + vltmp, vq.adverr = vq.b.listVersions(vq.id) if vq.adverr != nil { return vq.adverr } - // defensive copy - calling ListVersions here means slice contents may + // defensive copy - calling listVersions here means slice contents may // be modified when removing prefv/lockv. vq.pi = make([]Version, len(vltmp)) copy(vq.pi, vltmp) diff --git a/vendor/github.com/sdboyer/gps/version_queue_test.go b/vendor/github.com/sdboyer/gps/version_queue_test.go index 2abc906ac8..bdea66191b 100644 --- a/vendor/github.com/sdboyer/gps/version_queue_test.go +++ b/vendor/github.com/sdboyer/gps/version_queue_test.go @@ -23,7 +23,11 @@ func init() { SortForUpgrade(fakevl) } -func (fb *fakeBridge) ListVersions(id ProjectIdentifier) ([]Version, error) { +func (fb *fakeBridge) ListVersions(id ProjectIdentifier) ([]PairedVersion, error) { + return nil, nil +} + +func (fb *fakeBridge) listVersions(id ProjectIdentifier) ([]Version, error) { // it's a fixture, we only ever do the one, regardless of id return fb.vl, nil } @@ -34,7 +38,11 @@ type fakeFailBridge struct { var errVQ = fmt.Errorf("vqerr") -func (fb *fakeFailBridge) ListVersions(id ProjectIdentifier) ([]Version, error) { +func (fb *fakeFailBridge) ListVersions(id ProjectIdentifier) ([]PairedVersion, error) { + return nil, nil +} + +func (fb *fakeFailBridge) listVersions(id ProjectIdentifier) ([]Version, error) { return nil, errVQ } @@ -55,7 +63,7 @@ func TestVersionQueueSetup(t *testing.T) { t.Errorf("Unexpected err on vq create: %s", err) } else { if len(vq.pi) != 5 { - t.Errorf("Should have five versions from ListVersions() when providing no prefv or lockv; got %v:\n\t%s", len(vq.pi), vq.String()) + t.Errorf("Should have five versions from listVersions() when providing no prefv or lockv; got %v:\n\t%s", len(vq.pi), vq.String()) } if !vq.allLoaded { t.Errorf("allLoaded flag should be set, but wasn't") @@ -136,8 +144,7 @@ func TestVersionQueueAdvance(t *testing.T) { // First with no prefv or lockv vq, err := newVersionQueue(id, nil, nil, fb) if err != nil { - t.Errorf("Unexpected err on vq create: %s", err) - t.FailNow() + t.Fatalf("Unexpected err on vq create: %s", err) } for k, v := range fakevl[1:] { diff --git a/vendor/github.com/sdboyer/gps/version_test.go b/vendor/github.com/sdboyer/gps/version_test.go index 74d7610431..fe0ae77964 100644 --- a/vendor/github.com/sdboyer/gps/version_test.go +++ b/vendor/github.com/sdboyer/gps/version_test.go @@ -7,13 +7,13 @@ func TestVersionSorts(t *testing.T) { v1 := NewBranch("master").Is(rev) v2 := NewBranch("test").Is(rev) v3 := NewVersion("1.0.0").Is(rev) - v4 := NewVersion("1.0.1") - v5 := NewVersion("v2.0.5") - v6 := NewVersion("2.0.5.2") - v7 := newDefaultBranch("unwrapped") - v8 := NewVersion("20.0.5.2") - v9 := NewVersion("v1.5.5-beta.4") - v10 := NewVersion("v3.0.1-alpha.1") + v4 := NewVersion("1.0.1").Is(rev) + v5 := NewVersion("v2.0.5").Is(rev) + v6 := NewVersion("2.0.5.2").Is(rev) + v7 := newDefaultBranch("unwrapped").Is(rev) + v8 := NewVersion("20.0.5.2").Is(rev) + v9 := NewVersion("v1.5.5-beta.4").Is(rev) + v10 := NewVersion("v3.0.1-alpha.1").Is(rev) start := []Version{ v1, @@ -99,6 +99,85 @@ func TestVersionSorts(t *testing.T) { t.Errorf("Expected version %s in position %v on up-then-downgrade sort, but got %s", edown[k], k, v) } } + if len(wrong) > 0 { + // Just helps with readability a bit + t.Fatalf("Up-then-downgrade sort positions with wrong versions: %v", wrong) + } + + /////////// + // Repeat for PairedVersion slices & sorts + + pdown, pup := make([]PairedVersion, 0, len(start)), make([]PairedVersion, 0, len(start)) + for _, v := range start { + if _, ok := v.(Revision); ok { + continue + } + pdown = append(pdown, v.(PairedVersion)) + pup = append(pup, v.(PairedVersion)) + } + + pedown, peup := make([]PairedVersion, 0, len(edown)), make([]PairedVersion, 0, len(eup)) + for _, v := range edown { + if _, ok := v.(Revision); ok { + continue + } + pedown = append(pedown, v.(PairedVersion)) + } + for _, v := range eup { + if _, ok := v.(Revision); ok { + continue + } + peup = append(peup, v.(PairedVersion)) + } + + SortPairedForUpgrade(pup) + for k, v := range pup { + if peup[k] != v { + wrong = append(wrong, k) + t.Errorf("Expected version %s in position %v on upgrade sort, but got %s", peup[k], k, v) + } + } + if len(wrong) > 0 { + // Just helps with readability a bit + t.Errorf("Upgrade sort positions with wrong versions: %v", wrong) + } + + SortPairedForDowngrade(pdown) + wrong = wrong[:0] + for k, v := range pdown { + if pedown[k] != v { + wrong = append(wrong, k) + t.Errorf("Expected version %s in position %v on downgrade sort, but got %s", pedown[k], k, v) + } + } + if len(wrong) > 0 { + // Just helps with readability a bit + t.Errorf("Downgrade sort positions with wrong versions: %v", wrong) + } + + // Now make sure we sort back the other way correctly...just because + SortPairedForUpgrade(pdown) + wrong = wrong[:0] + for k, v := range pdown { + if peup[k] != v { + wrong = append(wrong, k) + t.Errorf("Expected version %s in position %v on down-then-upgrade sort, but got %s", peup[k], k, v) + } + } + if len(wrong) > 0 { + // Just helps with readability a bit + t.Errorf("Down-then-upgrade sort positions with wrong versions: %v", wrong) + } + + // Now make sure we sort back the other way correctly...just because + SortPairedForDowngrade(pup) + wrong = wrong[:0] + for k, v := range pup { + if pedown[k] != v { + wrong = append(wrong, k) + t.Errorf("Expected version %s in position %v on up-then-downgrade sort, but got %s", pedown[k], k, v) + } + } if len(wrong) > 0 { // Just helps with readability a bit t.Errorf("Up-then-downgrade sort positions with wrong versions: %v", wrong) diff --git a/vendor/github.com/sdboyer/gps/version_unifier.go b/vendor/github.com/sdboyer/gps/version_unifier.go new file mode 100644 index 0000000000..ceaab29f30 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/version_unifier.go @@ -0,0 +1,260 @@ +package gps + +// versionUnifier facilitates cross-type version comparison and set operations. +type versionUnifier struct { + b sourceBridge + mtr *metrics +} + +// pairVersion takes an UnpairedVersion and attempts to pair it with an +// underlying Revision in the context of the provided ProjectIdentifier by +// consulting the canonical version list. +func (vu versionUnifier) pairVersion(id ProjectIdentifier, v UnpairedVersion) PairedVersion { + vl, err := vu.b.listVersions(id) + if err != nil { + return nil + } + + vu.mtr.push("b-pair-version") + // doing it like this is a bit sloppy + for _, v2 := range vl { + if p, ok := v2.(PairedVersion); ok { + if p.Matches(v) { + vu.mtr.pop() + return p + } + } + } + + vu.mtr.pop() + return nil +} + +// pairRevision takes a Revision and attempts to pair it with all possible +// versionsby consulting the canonical version list of the provided +// ProjectIdentifier. +func (vu versionUnifier) pairRevision(id ProjectIdentifier, r Revision) []Version { + vl, err := vu.b.listVersions(id) + if err != nil { + return nil + } + + vu.mtr.push("b-pair-rev") + p := []Version{r} + // doing it like this is a bit sloppy + for _, v2 := range vl { + if pv, ok := v2.(PairedVersion); ok { + if pv.Matches(r) { + p = append(p, pv) + } + } + } + + vu.mtr.pop() + return p +} + +// matches performs a typical match check between the provided version and +// constraint. If that basic check fails and the provided version is incomplete +// (e.g. an unpaired version or bare revision), it will attempt to gather more +// information on one or the other and re-perform the comparison. +func (vu versionUnifier) matches(id ProjectIdentifier, c Constraint, v Version) bool { + if c.Matches(v) { + return true + } + + vu.mtr.push("b-matches") + // This approach is slightly wasteful, but just SO much less verbose, and + // more easily understood. + vtu := vu.createTypeUnion(id, v) + + var uc Constraint + if cv, ok := c.(Version); ok { + uc = vu.createTypeUnion(id, cv) + } else { + uc = c + } + + vu.mtr.pop() + return uc.Matches(vtu) +} + +// matchesAny is the authoritative version of Constraint.MatchesAny. +func (vu versionUnifier) matchesAny(id ProjectIdentifier, c1, c2 Constraint) bool { + if c1.MatchesAny(c2) { + return true + } + + vu.mtr.push("b-matches-any") + // This approach is slightly wasteful, but just SO much less verbose, and + // more easily understood. + var uc1, uc2 Constraint + if v1, ok := c1.(Version); ok { + uc1 = vu.createTypeUnion(id, v1) + } else { + uc1 = c1 + } + + if v2, ok := c2.(Version); ok { + uc2 = vu.createTypeUnion(id, v2) + } else { + uc2 = c2 + } + + vu.mtr.pop() + return uc1.MatchesAny(uc2) +} + +// intersect is the authoritative version of Constraint.Intersect. +func (vu versionUnifier) intersect(id ProjectIdentifier, c1, c2 Constraint) Constraint { + rc := c1.Intersect(c2) + if rc != none { + return rc + } + + vu.mtr.push("b-intersect") + // This approach is slightly wasteful, but just SO much less verbose, and + // more easily understood. + var uc1, uc2 Constraint + if v1, ok := c1.(Version); ok { + uc1 = vu.createTypeUnion(id, v1) + } else { + uc1 = c1 + } + + if v2, ok := c2.(Version); ok { + uc2 = vu.createTypeUnion(id, v2) + } else { + uc2 = c2 + } + + vu.mtr.pop() + return uc1.Intersect(uc2) +} + +// createTypeUnion creates a versionTypeUnion for the provided version. +// +// This union may (and typically will) end up being nothing more than the single +// input version, but creating a versionTypeUnion guarantees that 'local' +// constraint checks (direct method calls) are authoritative. +func (vu versionUnifier) createTypeUnion(id ProjectIdentifier, v Version) versionTypeUnion { + switch tv := v.(type) { + case Revision: + return versionTypeUnion(vu.pairRevision(id, tv)) + case PairedVersion: + return versionTypeUnion(vu.pairRevision(id, tv.Underlying())) + case UnpairedVersion: + pv := vu.pairVersion(id, tv) + if pv == nil { + return versionTypeUnion{tv} + } + + return versionTypeUnion(vu.pairRevision(id, pv.Underlying())) + } + + return nil +} + +// versionTypeUnion represents a set of versions that are, within the scope of +// this solver run, equivalent. +// +// The simple case here is just a pair - a normal version plus its underlying +// revision - but if a tag or branch point at the same rev, then we consider +// them equivalent. Again, however, this equivalency is short-lived; it must be +// re-assessed during every solver run. +// +// The union members are treated as being OR'd together: all constraint +// operations attempt each member, and will take the most open/optimistic +// answer. +// +// This technically does allow tags to match branches - something we otherwise +// try hard to avoid - but because the original input constraint never actually +// changes (and is never written out in the Solution), there's no harmful case +// of a user suddenly riding a branch when they expected a fixed tag. +type versionTypeUnion []Version + +// This should generally not be called, but is required for the interface. If it +// is called, we have a bigger problem (the type has escaped the solver); thus, +// panic. +func (vtu versionTypeUnion) String() string { + panic("versionTypeUnion should never be turned into a string; it is solver internal-only") +} + +func (vtu versionTypeUnion) typedString() string { + panic("versionTypeUnion should never be turned into a string; it is solver internal-only") +} + +// This should generally not be called, but is required for the interface. If it +// is called, we have a bigger problem (the type has escaped the solver); thus, +// panic. +func (vtu versionTypeUnion) Type() VersionType { + panic("versionTypeUnion should never need to answer a Type() call; it is solver internal-only") +} + +// Matches takes a version, and returns true if that version matches any version +// contained in the union. +// +// This DOES allow tags to match branches, albeit indirectly through a revision. +func (vtu versionTypeUnion) Matches(v Version) bool { + vtu2, otherIs := v.(versionTypeUnion) + + for _, v1 := range vtu { + if otherIs { + for _, v2 := range vtu2 { + if v1.Matches(v2) { + return true + } + } + } else if v1.Matches(v) { + return true + } + } + + return false +} + +// MatchesAny returns true if any of the contained versions (which are also +// constraints) in the union successfully MatchAny with the provided +// constraint. +func (vtu versionTypeUnion) MatchesAny(c Constraint) bool { + vtu2, otherIs := c.(versionTypeUnion) + + for _, v1 := range vtu { + if otherIs { + for _, v2 := range vtu2 { + if v1.MatchesAny(v2) { + return true + } + } + } else if v1.MatchesAny(c) { + return true + } + } + + return false +} + +// Intersect takes a constraint, and attempts to intersect it with all the +// versions contained in the union until one returns non-none. If that never +// happens, then none is returned. +// +// In order to avoid weird version floating elsewhere in the solver, the union +// always returns the input constraint. (This is probably obviously correct, but +// is still worth noting.) +func (vtu versionTypeUnion) Intersect(c Constraint) Constraint { + vtu2, otherIs := c.(versionTypeUnion) + + for _, v1 := range vtu { + if otherIs { + for _, v2 := range vtu2 { + if rc := v1.Intersect(v2); rc != none { + return rc + } + } + } else if rc := v1.Intersect(c); rc != none { + return rc + } + } + + return none +} diff --git a/vendor/github.com/sdboyer/gps/version_unifier_test.go b/vendor/github.com/sdboyer/gps/version_unifier_test.go new file mode 100644 index 0000000000..b5893de5b4 --- /dev/null +++ b/vendor/github.com/sdboyer/gps/version_unifier_test.go @@ -0,0 +1,138 @@ +package gps + +import ( + "testing" + + "github.com/sdboyer/gps/pkgtree" +) + +type lvFixBridge []Version + +var lvfb1 lvFixBridge + +func init() { + rev1 := Revision("revision-one") + rev2 := Revision("revision-two") + rev3 := Revision("revision-three") + + lvfb1 = lvFixBridge{ + NewBranch("master").Is(rev1), + NewBranch("test").Is(rev2), + NewVersion("1.0.0").Is(rev1), + NewVersion("1.0.1").Is("other1"), + NewVersion("v2.0.5").Is(rev3), + NewVersion("2.0.5.2").Is(rev3), + newDefaultBranch("unwrapped").Is(rev3), + NewVersion("20.0.5.2").Is(rev1), + NewVersion("v1.5.5-beta.4").Is("other2"), + NewVersion("v3.0.1-alpha.1").Is(rev2), + } +} + +func (lb lvFixBridge) listVersions(ProjectIdentifier) ([]Version, error) { + return lb, nil +} + +func TestCreateTyepUnion(t *testing.T) { + vu := versionUnifier{ + b: lvfb1, + mtr: newMetrics(), + } + + rev1 := Revision("revision-one") + rev2 := Revision("revision-two") + id := mkPI("irrelevant") + + vtu := vu.createTypeUnion(id, rev1) + if len(vtu) != 4 { + t.Fatalf("wanted a type union with four elements, got %v: \n%#v", len(vtu), vtu) + } + + vtu = vu.createTypeUnion(id, NewBranch("master")) + if len(vtu) != 4 { + t.Fatalf("wanted a type union with four elements, got %v: \n%#v", len(vtu), vtu) + } + + vtu = vu.createTypeUnion(id, Revision("notexist")) + if len(vtu) != 1 { + t.Fatalf("wanted a type union with one elements, got %v: \n%#v", len(vtu), vtu) + } + + vtu = vu.createTypeUnion(id, rev2) + if len(vtu) != 3 { + t.Fatalf("wanted a type union with three elements, got %v: \n%#v", len(vtu), vtu) + } + + vtu = vu.createTypeUnion(id, nil) + if vtu != nil { + t.Fatalf("wanted a nil return on nil input, got %#v", vtu) + } +} + +func TestTypeUnionIntersect(t *testing.T) { + vu := versionUnifier{ + b: lvfb1, + mtr: newMetrics(), + } + + rev1 := Revision("revision-one") + rev2 := Revision("revision-two") + rev3 := Revision("revision-three") + id := mkPI("irrelevant") + + c, _ := NewSemverConstraint("^2.0.0") + gotc := vu.intersect(id, rev2, c) + if gotc != none { + t.Fatalf("wanted empty set from intersect, got %#v", gotc) + } + + gotc = vu.intersect(id, c, rev1) + if gotc != none { + t.Fatalf("wanted empty set from intersect, got %#v", gotc) + } + + gotc = vu.intersect(id, c, rev3) + if gotc != NewVersion("v2.0.5").Is(rev3) { + t.Fatalf("wanted v2.0.5, got %s from intersect", gotc.typedString()) + } +} + +func (lb lvFixBridge) SourceExists(ProjectIdentifier) (bool, error) { + panic("not implemented") +} + +func (lb lvFixBridge) SyncSourceFor(ProjectIdentifier) error { + panic("not implemented") +} + +func (lb lvFixBridge) RevisionPresentIn(ProjectIdentifier, Revision) (bool, error) { + panic("not implemented") +} + +func (lb lvFixBridge) ListPackages(ProjectIdentifier, Version) (pkgtree.PackageTree, error) { + panic("not implemented") +} + +func (lb lvFixBridge) GetManifestAndLock(ProjectIdentifier, Version, ProjectAnalyzer) (Manifest, Lock, error) { + panic("not implemented") +} + +func (lb lvFixBridge) ExportProject(ProjectIdentifier, Version, string) error { + panic("not implemented") +} + +func (lb lvFixBridge) DeduceProjectRoot(ip string) (ProjectRoot, error) { + panic("not implemented") +} + +func (lb lvFixBridge) verifyRootDir(path string) error { + panic("not implemented") +} + +func (lb lvFixBridge) vendorCodeExists(ProjectIdentifier) (bool, error) { + panic("not implemented") +} + +func (lb lvFixBridge) breakLock() { + panic("not implemented") +} From 8438fd4bbce884b380119d9b690f2d21d3a574ad Mon Sep 17 00:00:00 2001 From: sam boyer Date: Sat, 15 Apr 2017 01:06:44 -0400 Subject: [PATCH 2/3] Chase gps' API changes --- cmd/dep/ensure.go | 5 +---- cmd/dep/init.go | 1 + cmd/dep/status.go | 11 ++--------- context.go | 2 +- project.go | 3 ++- 5 files changed, 7 insertions(+), 15 deletions(-) diff --git a/cmd/dep/ensure.go b/cmd/dep/ensure.go index 1b840da1ce..6ed07aca0f 100644 --- a/cmd/dep/ensure.go +++ b/cmd/dep/ensure.go @@ -309,11 +309,8 @@ func getProjectConstraint(arg string, sm *gps.SourceMgr) (gps.ProjectConstraint, var found bool for _, version := range versions { if versionStr == version.String() { - if pv, ok := version.(gps.PairedVersion); ok { - version = pv.Unpair() - } found = true - constraint.Constraint = version + constraint.Constraint = version.Unpair() break } } diff --git a/cmd/dep/init.go b/cmd/dep/init.go index f9b162d4ba..a74f480422 100644 --- a/cmd/dep/init.go +++ b/cmd/dep/init.go @@ -141,6 +141,7 @@ func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { RootPackageTree: pkgT, Manifest: m, Lock: l, + ProjectAnalyzer: analyzer{}, } if *verbose { diff --git a/cmd/dep/status.go b/cmd/dep/status.go index b81f18253a..614827177c 100644 --- a/cmd/dep/status.go +++ b/cmd/dep/status.go @@ -336,21 +336,14 @@ func runStatusAll(out outputter, p *dep.Project, sm *gps.SourceMgr) error { vl, err := sm.ListVersions(proj.Ident()) if err == nil { - gps.SortForUpgrade(vl) + gps.SortPairedForUpgrade(vl) for _, v := range vl { // Because we've sorted the version list for // upgrade, the first version we encounter that // matches our constraint will be what we want. if c.Constraint.Matches(v) { - // For branch constraints this should be the - // most recent revision on the selected - // branch. - if tv, ok := v.(gps.PairedVersion); ok && v.Type() == gps.IsBranch { - bs.Latest = tv.Underlying() - } else { - bs.Latest = v - } + bs.Latest = v.Underlying() break } } diff --git a/context.go b/context.go index ca86495b74..5f7efbb90c 100644 --- a/context.go +++ b/context.go @@ -52,7 +52,7 @@ func NewContext() (*Ctx, error) { } func (c *Ctx) SourceManager() (*gps.SourceMgr, error) { - return gps.NewSourceManager(analyzer{}, filepath.Join(c.GOPATH, "pkg", "dep")) + return gps.NewSourceManager(filepath.Join(c.GOPATH, "pkg", "dep")) } // LoadProject takes a path and searches up the directory tree for diff --git a/project.go b/project.go index d2e51e8d5d..7031732a65 100644 --- a/project.go +++ b/project.go @@ -59,7 +59,8 @@ type Project struct { // any nils incorrectly. func (p *Project) MakeParams() gps.SolveParameters { params := gps.SolveParameters{ - RootDir: p.AbsRoot, + RootDir: p.AbsRoot, + ProjectAnalyzer: analyzer{}, } if p.Manifest != nil { From 00a5b76e3610c146f449be0011ca0cabda5baafc Mon Sep 17 00:00:00 2001 From: sam boyer Date: Sat, 15 Apr 2017 01:23:16 -0400 Subject: [PATCH 3/3] Whoops, need to export Analyzer now --- analyzer.go | 6 +++--- analyzer_test.go | 4 ++-- cmd/dep/init.go | 2 +- project.go | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/analyzer.go b/analyzer.go index aed9aa9305..ffd338c974 100644 --- a/analyzer.go +++ b/analyzer.go @@ -11,9 +11,9 @@ import ( "github.com/sdboyer/gps" ) -type analyzer struct{} +type Analyzer struct{} -func (a analyzer) DeriveManifestAndLock(path string, n gps.ProjectRoot) (gps.Manifest, gps.Lock, error) { +func (a Analyzer) DeriveManifestAndLock(path string, n gps.ProjectRoot) (gps.Manifest, gps.Lock, error) { // TODO: If we decide to support other tools manifest, this is where we would need // to add that support. mf := filepath.Join(path, ManifestName) @@ -36,6 +36,6 @@ func (a analyzer) DeriveManifestAndLock(path string, n gps.ProjectRoot) (gps.Man return m, nil, nil } -func (a analyzer) Info() (string, int) { +func (a Analyzer) Info() (string, int) { return "dep", 1 } diff --git a/analyzer_test.go b/analyzer_test.go index 0d3f4a6a82..ff53a1c68f 100644 --- a/analyzer_test.go +++ b/analyzer_test.go @@ -22,7 +22,7 @@ func TestDeriveManifestAndLock(t *testing.T) { want := h.GetTestFileString(golden) h.TempCopy(filepath.Join("dep", ManifestName), golden) - a := analyzer{} + a := Analyzer{} m, l, err := a.DeriveManifestAndLock(h.Path("dep"), "my/fake/project") if err != nil { @@ -56,7 +56,7 @@ func TestDeriveManifestAndLockDoesNotExist(t *testing.T) { } defer os.RemoveAll(dir) - a := analyzer{} + a := Analyzer{} m, l, err := a.DeriveManifestAndLock(dir, "my/fake/project") if m != nil || l != nil || err != nil { diff --git a/cmd/dep/init.go b/cmd/dep/init.go index a74f480422..7609aca265 100644 --- a/cmd/dep/init.go +++ b/cmd/dep/init.go @@ -141,7 +141,7 @@ func (cmd *initCommand) Run(ctx *dep.Ctx, args []string) error { RootPackageTree: pkgT, Manifest: m, Lock: l, - ProjectAnalyzer: analyzer{}, + ProjectAnalyzer: dep.Analyzer{}, } if *verbose { diff --git a/project.go b/project.go index 7031732a65..eaea2ea534 100644 --- a/project.go +++ b/project.go @@ -60,7 +60,7 @@ type Project struct { func (p *Project) MakeParams() gps.SolveParameters { params := gps.SolveParameters{ RootDir: p.AbsRoot, - ProjectAnalyzer: analyzer{}, + ProjectAnalyzer: Analyzer{}, } if p.Manifest != nil {