From 671604143f83c734c82946b424d00805ef0feb02 Mon Sep 17 00:00:00 2001 From: Bill Farner Date: Tue, 15 Nov 2016 07:32:49 -0800 Subject: [PATCH 1/2] Use JSON-RPC over HTTP for communication between plugins Signed-off-by: Bill Farner --- cli/serverutil.go | 9 +- cmd/cli/flavor.go | 5 +- cmd/cli/group.go | 5 +- cmd/cli/instance.go | 5 +- cmd/group/main.go | 4 +- cmd/manager/main.go | 17 +- discovery/dir_test.go | 20 +-- example/flavor/combo/main.go | 2 +- leader/swarm/swarm_test.go | 6 +- manager/group_plugin_impl.go | 7 +- manager/manager.go | 2 +- mock/docker/docker/client/api.go | 257 +++++++++++++++++++++++++++-- plugin/flavor/swarm/flavor.go | 4 +- plugin/flavor/swarm/flavor_test.go | 16 +- rpc/client/client.go | 39 +++++ rpc/flavor/client.go | 47 +++--- rpc/flavor/rpc_test.go | 56 +++---- rpc/flavor/service.go | 19 ++- rpc/flavor/types.go | 25 +-- rpc/group/client.go | 55 +++--- rpc/group/rpc_test.go | 58 +++---- rpc/group/service.go | 13 +- rpc/group/types.go | 10 -- rpc/instance/client.go | 55 +++--- rpc/instance/rpc_test.go | 69 ++++---- rpc/instance/service.go | 18 +- rpc/instance/types.go | 18 +- rpc/server.go | 161 ------------------ rpc/server/server.go | 68 ++++++++ rpc/{ => server}/server_test.go | 13 +- vendor.conf | 42 +++-- 31 files changed, 597 insertions(+), 528 deletions(-) create mode 100644 rpc/client/client.go delete mode 100644 rpc/server.go create mode 100644 rpc/server/server.go rename rpc/{ => server}/server_test.go (84%) diff --git a/cli/serverutil.go b/cli/serverutil.go index 3d5ce4feb..436c82040 100644 --- a/cli/serverutil.go +++ b/cli/serverutil.go @@ -5,16 +5,15 @@ import ( log "github.com/Sirupsen/logrus" "github.com/docker/infrakit/discovery" - "github.com/docker/infrakit/rpc" + "github.com/docker/infrakit/rpc/server" ) // RunPlugin runs a plugin server, advertising with the provided name for discovery. -// THe plugin should conform to the rpc call convention as implemented in the rpc package. +// The plugin should conform to the rpc call convention as implemented in the rpc package. func RunPlugin(name string, plugin interface{}) { - _, stopped, err := rpc.StartPluginAtPath(path.Join(discovery.Dir(), name), plugin) + stoppable, err := server.StartPluginAtPath(path.Join(discovery.Dir(), name), plugin) if err != nil { log.Error(err) } - - <-stopped // block until done + stoppable.AwaitStopped() } diff --git a/cmd/cli/flavor.go b/cmd/cli/flavor.go index d8e15096b..5895f5dfa 100644 --- a/cmd/cli/flavor.go +++ b/cmd/cli/flavor.go @@ -31,10 +31,7 @@ func flavorPluginCommand(plugins func() discovery.Plugins) *cobra.Command { return err } - flavorPlugin, err = flavor_plugin.NewClient(endpoint.Protocol, endpoint.Address) - if err != nil { - return err - } + flavorPlugin = flavor_plugin.NewClient(endpoint.Address) return nil }, diff --git a/cmd/cli/group.go b/cmd/cli/group.go index 30ab24429..d7befa9e0 100644 --- a/cmd/cli/group.go +++ b/cmd/cli/group.go @@ -35,10 +35,7 @@ func groupPluginCommand(plugins func() discovery.Plugins) *cobra.Command { return err } - groupPlugin, err = group_plugin.NewClient(endpoint.Protocol, endpoint.Address) - if err != nil { - return err - } + groupPlugin = group_plugin.NewClient(endpoint.Address) return nil }, diff --git a/cmd/cli/instance.go b/cmd/cli/instance.go index d2f9a0031..887adcd44 100644 --- a/cmd/cli/instance.go +++ b/cmd/cli/instance.go @@ -30,10 +30,7 @@ func instancePluginCommand(plugins func() discovery.Plugins) *cobra.Command { return err } - instancePlugin, err = instance_plugin.NewClient(endpoint.Protocol, endpoint.Address) - if err != nil { - return err - } + instancePlugin = instance_plugin.NewClient(endpoint.Address) return nil }, diff --git a/cmd/group/main.go b/cmd/group/main.go index 5cdb72643..9574774ca 100644 --- a/cmd/group/main.go +++ b/cmd/group/main.go @@ -40,7 +40,7 @@ func main() { if err != nil { return nil, err } - return instance_client.NewClient(endpoint.Protocol, endpoint.Address) + return instance_client.NewClient(endpoint.Address), nil } flavorPluginLookup := func(n string) (flavor.Plugin, error) { @@ -48,7 +48,7 @@ func main() { if err != nil { return nil, err } - return flavor_client.NewClient(endpoint.Protocol, endpoint.Address) + return flavor_client.NewClient(endpoint.Address), nil } cli.RunPlugin(name, group_server.PluginServer( diff --git a/cmd/manager/main.go b/cmd/manager/main.go index 691484a6b..cf020d900 100644 --- a/cmd/manager/main.go +++ b/cmd/manager/main.go @@ -9,7 +9,6 @@ import ( "github.com/docker/infrakit/discovery" "github.com/docker/infrakit/leader" "github.com/docker/infrakit/manager" - "github.com/docker/infrakit/rpc" group_rpc "github.com/docker/infrakit/rpc/group" "github.com/docker/infrakit/store" "github.com/spf13/cobra" @@ -52,28 +51,20 @@ func runMain(backend *backend) error { log.Infoln("Starting up manager:", backend) - manager, err := manager.NewManager(backend.plugins, + mgr, err := manager.NewManager(backend.plugins, backend.leader, backend.snapshot, backend.pluginName) if err != nil { return err } - _, err = manager.Start() + _, err = mgr.Start() if err != nil { return err } - _, stopped, err := rpc.StartPluginAtPath( - filepath.Join(discovery.Dir(), backend.id), - group_rpc.PluginServer(manager), - ) - if err != nil { - return err - } - - <-stopped // block until done + cli.RunPlugin(backend.id, group_rpc.PluginServer(mgr)) - manager.Stop() + mgr.Stop() log.Infoln("Manager stopped") return err diff --git a/discovery/dir_test.go b/discovery/dir_test.go index f1a41bf07..e78aefb1e 100644 --- a/discovery/dir_test.go +++ b/discovery/dir_test.go @@ -7,8 +7,8 @@ import ( "testing" "time" - server "github.com/docker/infrakit/rpc" rpc "github.com/docker/infrakit/rpc/instance" + "github.com/docker/infrakit/rpc/server" "github.com/stretchr/testify/require" ) @@ -31,17 +31,15 @@ func TestDirDiscovery(t *testing.T) { name1 := "server1" path1 := filepath.Join(dir, name1) - stop1, errors1, err1 := server.StartPluginAtPath(path1, rpc.PluginServer(nil)) - require.NoError(t, err1) - require.NotNil(t, stop1) - require.NotNil(t, errors1) + server1, err := server.StartPluginAtPath(path1, rpc.PluginServer(nil)) + require.NoError(t, err) + require.NotNil(t, server1) name2 := "server2" path2 := filepath.Join(dir, name2) - stop2, errors2, err2 := server.StartPluginAtPath(path2, rpc.PluginServer(nil)) - require.NoError(t, err2) - require.NotNil(t, stop2) - require.NotNil(t, errors2) + server2, err := server.StartPluginAtPath(path2, rpc.PluginServer(nil)) + require.NoError(t, err) + require.NotNil(t, server2) discover, err := newDirPluginDiscovery(dir) require.NoError(t, err) @@ -55,7 +53,7 @@ func TestDirDiscovery(t *testing.T) { require.NotNil(t, p) // Now we stop the servers - close(stop1) + server1.Stop() blockWhileFileExists(path1) p, err = discover.Find(name1) @@ -65,7 +63,7 @@ func TestDirDiscovery(t *testing.T) { require.NoError(t, err) require.NotNil(t, p) - close(stop2) + server2.Stop() blockWhileFileExists(path2) diff --git a/example/flavor/combo/main.go b/example/flavor/combo/main.go index cfe1e64b0..ef269d195 100644 --- a/example/flavor/combo/main.go +++ b/example/flavor/combo/main.go @@ -32,7 +32,7 @@ func main() { if err != nil { return nil, err } - return flavor_rpc.NewClient(endpoint.Protocol, endpoint.Address) + return flavor_rpc.NewClient(endpoint.Address), nil } cli.SetLogLevel(logLevel) diff --git a/leader/swarm/swarm_test.go b/leader/swarm/swarm_test.go index e4eca2d51..9952bce89 100644 --- a/leader/swarm/swarm_test.go +++ b/leader/swarm/swarm_test.go @@ -19,8 +19,10 @@ func TestSwarmDetector(t *testing.T) { ctx := context.Background() nodeInfo := types.Info{ - Swarm: swarm.Info{ - NodeID: "node", + InfoBase: &types.InfoBase{ + Swarm: swarm.Info{ + NodeID: "node", + }, }, } node := swarm.Node{ diff --git a/manager/group_plugin_impl.go b/manager/group_plugin_impl.go index e17b0b866..05831801c 100644 --- a/manager/group_plugin_impl.go +++ b/manager/group_plugin_impl.go @@ -18,13 +18,8 @@ func (m *manager) proxyForGroupPlugin(name string) (group.Plugin, error) { return nil, err } - client, err := rpc.NewClient(endpoint.Protocol, endpoint.Address) - if err != nil { - return nil, err - } - m.backendName = name - return client, nil + return rpc.NewClient(endpoint.Address), nil } // This implements the Group Plugin interface to support single group-only operations diff --git a/manager/manager.go b/manager/manager.go index 209ba9cb1..ff647e9fb 100644 --- a/manager/manager.go +++ b/manager/manager.go @@ -326,7 +326,7 @@ func (m *manager) execPlugins(config GlobalSpec, work func(group.Plugin, group.S return err } - gp, err := rpc.NewClient(ep.Protocol, ep.Address) + gp := rpc.NewClient(ep.Address) if err != nil { log.Warningln("Cannot contact group", id, " at plugin", name, "endpoint=", ep.Address) return err diff --git a/mock/docker/docker/client/api.go b/mock/docker/docker/client/api.go index 328bbd9e0..f2f2707fc 100644 --- a/mock/docker/docker/client/api.go +++ b/mock/docker/docker/client/api.go @@ -11,6 +11,7 @@ import ( network "github.com/docker/docker/api/types/network" registry "github.com/docker/docker/api/types/registry" swarm "github.com/docker/docker/api/types/swarm" + volume "github.com/docker/docker/api/types/volume" gomock "github.com/golang/mock/gomock" context "golang.org/x/net/context" io "io" @@ -38,6 +39,37 @@ func (_m *MockAPIClient) EXPECT() *_MockAPIClientRecorder { return _m.recorder } +func (_m *MockAPIClient) CheckpointCreate(_param0 context.Context, _param1 string, _param2 types.CheckpointCreateOptions) error { + ret := _m.ctrl.Call(_m, "CheckpointCreate", _param0, _param1, _param2) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) CheckpointCreate(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "CheckpointCreate", arg0, arg1, arg2) +} + +func (_m *MockAPIClient) CheckpointDelete(_param0 context.Context, _param1 string, _param2 types.CheckpointDeleteOptions) error { + ret := _m.ctrl.Call(_m, "CheckpointDelete", _param0, _param1, _param2) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) CheckpointDelete(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "CheckpointDelete", arg0, arg1, arg2) +} + +func (_m *MockAPIClient) CheckpointList(_param0 context.Context, _param1 string, _param2 types.CheckpointListOptions) ([]types.Checkpoint, error) { + ret := _m.ctrl.Call(_m, "CheckpointList", _param0, _param1, _param2) + ret0, _ := ret[0].([]types.Checkpoint) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockAPIClientRecorder) CheckpointList(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "CheckpointList", arg0, arg1, arg2) +} + func (_m *MockAPIClient) ClientVersion() string { ret := _m.ctrl.Call(_m, "ClientVersion") ret0, _ := ret[0].(string) @@ -59,9 +91,9 @@ func (_mr *_MockAPIClientRecorder) ContainerAttach(arg0, arg1, arg2 interface{}) return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerAttach", arg0, arg1, arg2) } -func (_m *MockAPIClient) ContainerCommit(_param0 context.Context, _param1 string, _param2 types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { +func (_m *MockAPIClient) ContainerCommit(_param0 context.Context, _param1 string, _param2 types.ContainerCommitOptions) (types.IDResponse, error) { ret := _m.ctrl.Call(_m, "ContainerCommit", _param0, _param1, _param2) - ret0, _ := ret[0].(types.ContainerCommitResponse) + ret0, _ := ret[0].(types.IDResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -70,9 +102,9 @@ func (_mr *_MockAPIClientRecorder) ContainerCommit(arg0, arg1, arg2 interface{}) return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerCommit", arg0, arg1, arg2) } -func (_m *MockAPIClient) ContainerCreate(_param0 context.Context, _param1 *container.Config, _param2 *container.HostConfig, _param3 *network.NetworkingConfig, _param4 string) (types.ContainerCreateResponse, error) { +func (_m *MockAPIClient) ContainerCreate(_param0 context.Context, _param1 *container.Config, _param2 *container.HostConfig, _param3 *network.NetworkingConfig, _param4 string) (container.ContainerCreateCreatedBody, error) { ret := _m.ctrl.Call(_m, "ContainerCreate", _param0, _param1, _param2, _param3, _param4) - ret0, _ := ret[0].(types.ContainerCreateResponse) + ret0, _ := ret[0].(container.ContainerCreateCreatedBody) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -103,9 +135,9 @@ func (_mr *_MockAPIClientRecorder) ContainerExecAttach(arg0, arg1, arg2 interfac return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerExecAttach", arg0, arg1, arg2) } -func (_m *MockAPIClient) ContainerExecCreate(_param0 context.Context, _param1 string, _param2 types.ExecConfig) (types.ContainerExecCreateResponse, error) { +func (_m *MockAPIClient) ContainerExecCreate(_param0 context.Context, _param1 string, _param2 types.ExecConfig) (types.IDResponse, error) { ret := _m.ctrl.Call(_m, "ContainerExecCreate", _param0, _param1, _param2) - ret0, _ := ret[0].(types.ContainerExecCreateResponse) + ret0, _ := ret[0].(types.IDResponse) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -324,9 +356,9 @@ func (_mr *_MockAPIClientRecorder) ContainerUnpause(arg0, arg1 interface{}) *gom return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerUnpause", arg0, arg1) } -func (_m *MockAPIClient) ContainerUpdate(_param0 context.Context, _param1 string, _param2 container.UpdateConfig) (types.ContainerUpdateResponse, error) { +func (_m *MockAPIClient) ContainerUpdate(_param0 context.Context, _param1 string, _param2 container.UpdateConfig) (container.ContainerUpdateOKBody, error) { ret := _m.ctrl.Call(_m, "ContainerUpdate", _param0, _param1, _param2) - ret0, _ := ret[0].(types.ContainerUpdateResponse) + ret0, _ := ret[0].(container.ContainerUpdateOKBody) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -335,9 +367,9 @@ func (_mr *_MockAPIClientRecorder) ContainerUpdate(arg0, arg1, arg2 interface{}) return _mr.mock.ctrl.RecordCall(_mr.mock, "ContainerUpdate", arg0, arg1, arg2) } -func (_m *MockAPIClient) ContainerWait(_param0 context.Context, _param1 string) (int, error) { +func (_m *MockAPIClient) ContainerWait(_param0 context.Context, _param1 string) (int64, error) { ret := _m.ctrl.Call(_m, "ContainerWait", _param0, _param1) - ret0, _ := ret[0].(int) + ret0, _ := ret[0].(int64) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -457,9 +489,9 @@ func (_mr *_MockAPIClientRecorder) ImageInspectWithRaw(arg0, arg1 interface{}) * return _mr.mock.ctrl.RecordCall(_mr.mock, "ImageInspectWithRaw", arg0, arg1) } -func (_m *MockAPIClient) ImageList(_param0 context.Context, _param1 types.ImageListOptions) ([]types.Image, error) { +func (_m *MockAPIClient) ImageList(_param0 context.Context, _param1 types.ImageListOptions) ([]types.ImageSummary, error) { ret := _m.ctrl.Call(_m, "ImageList", _param0, _param1) - ret0, _ := ret[0].([]types.Image) + ret0, _ := ret[0].([]types.ImageSummary) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -641,6 +673,17 @@ func (_mr *_MockAPIClientRecorder) NetworkRemove(arg0, arg1 interface{}) *gomock return _mr.mock.ctrl.RecordCall(_mr.mock, "NetworkRemove", arg0, arg1) } +func (_m *MockAPIClient) NetworksPrune(_param0 context.Context, _param1 types.NetworksPruneConfig) (types.NetworksPruneReport, error) { + ret := _m.ctrl.Call(_m, "NetworksPrune", _param0, _param1) + ret0, _ := ret[0].(types.NetworksPruneReport) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockAPIClientRecorder) NetworksPrune(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "NetworksPrune", arg0, arg1) +} + func (_m *MockAPIClient) NodeInspectWithRaw(_param0 context.Context, _param1 string) (swarm.Node, []byte, error) { ret := _m.ctrl.Call(_m, "NodeInspectWithRaw", _param0, _param1) ret0, _ := ret[0].(swarm.Node) @@ -684,9 +727,113 @@ func (_mr *_MockAPIClientRecorder) NodeUpdate(arg0, arg1, arg2, arg3 interface{} return _mr.mock.ctrl.RecordCall(_mr.mock, "NodeUpdate", arg0, arg1, arg2, arg3) } -func (_m *MockAPIClient) RegistryLogin(_param0 context.Context, _param1 types.AuthConfig) (types.AuthResponse, error) { +func (_m *MockAPIClient) Ping(_param0 context.Context) (types.Ping, error) { + ret := _m.ctrl.Call(_m, "Ping", _param0) + ret0, _ := ret[0].(types.Ping) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockAPIClientRecorder) Ping(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "Ping", arg0) +} + +func (_m *MockAPIClient) PluginCreate(_param0 context.Context, _param1 io.Reader, _param2 types.PluginCreateOptions) error { + ret := _m.ctrl.Call(_m, "PluginCreate", _param0, _param1, _param2) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) PluginCreate(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "PluginCreate", arg0, arg1, arg2) +} + +func (_m *MockAPIClient) PluginDisable(_param0 context.Context, _param1 string) error { + ret := _m.ctrl.Call(_m, "PluginDisable", _param0, _param1) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) PluginDisable(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "PluginDisable", arg0, arg1) +} + +func (_m *MockAPIClient) PluginEnable(_param0 context.Context, _param1 string) error { + ret := _m.ctrl.Call(_m, "PluginEnable", _param0, _param1) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) PluginEnable(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "PluginEnable", arg0, arg1) +} + +func (_m *MockAPIClient) PluginInspectWithRaw(_param0 context.Context, _param1 string) (*types.Plugin, []byte, error) { + ret := _m.ctrl.Call(_m, "PluginInspectWithRaw", _param0, _param1) + ret0, _ := ret[0].(*types.Plugin) + ret1, _ := ret[1].([]byte) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +func (_mr *_MockAPIClientRecorder) PluginInspectWithRaw(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "PluginInspectWithRaw", arg0, arg1) +} + +func (_m *MockAPIClient) PluginInstall(_param0 context.Context, _param1 string, _param2 types.PluginInstallOptions) error { + ret := _m.ctrl.Call(_m, "PluginInstall", _param0, _param1, _param2) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) PluginInstall(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "PluginInstall", arg0, arg1, arg2) +} + +func (_m *MockAPIClient) PluginList(_param0 context.Context) (types.PluginsListResponse, error) { + ret := _m.ctrl.Call(_m, "PluginList", _param0) + ret0, _ := ret[0].(types.PluginsListResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockAPIClientRecorder) PluginList(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "PluginList", arg0) +} + +func (_m *MockAPIClient) PluginPush(_param0 context.Context, _param1 string, _param2 string) error { + ret := _m.ctrl.Call(_m, "PluginPush", _param0, _param1, _param2) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) PluginPush(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "PluginPush", arg0, arg1, arg2) +} + +func (_m *MockAPIClient) PluginRemove(_param0 context.Context, _param1 string, _param2 types.PluginRemoveOptions) error { + ret := _m.ctrl.Call(_m, "PluginRemove", _param0, _param1, _param2) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) PluginRemove(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "PluginRemove", arg0, arg1, arg2) +} + +func (_m *MockAPIClient) PluginSet(_param0 context.Context, _param1 string, _param2 []string) error { + ret := _m.ctrl.Call(_m, "PluginSet", _param0, _param1, _param2) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) PluginSet(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "PluginSet", arg0, arg1, arg2) +} + +func (_m *MockAPIClient) RegistryLogin(_param0 context.Context, _param1 types.AuthConfig) (registry.AuthenticateOKBody, error) { ret := _m.ctrl.Call(_m, "RegistryLogin", _param0, _param1) - ret0, _ := ret[0].(types.AuthResponse) + ret0, _ := ret[0].(registry.AuthenticateOKBody) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -695,6 +842,50 @@ func (_mr *_MockAPIClientRecorder) RegistryLogin(arg0, arg1 interface{}) *gomock return _mr.mock.ctrl.RecordCall(_mr.mock, "RegistryLogin", arg0, arg1) } +func (_m *MockAPIClient) SecretCreate(_param0 context.Context, _param1 swarm.SecretSpec) (types.SecretCreateResponse, error) { + ret := _m.ctrl.Call(_m, "SecretCreate", _param0, _param1) + ret0, _ := ret[0].(types.SecretCreateResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockAPIClientRecorder) SecretCreate(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SecretCreate", arg0, arg1) +} + +func (_m *MockAPIClient) SecretInspectWithRaw(_param0 context.Context, _param1 string) (swarm.Secret, []byte, error) { + ret := _m.ctrl.Call(_m, "SecretInspectWithRaw", _param0, _param1) + ret0, _ := ret[0].(swarm.Secret) + ret1, _ := ret[1].([]byte) + ret2, _ := ret[2].(error) + return ret0, ret1, ret2 +} + +func (_mr *_MockAPIClientRecorder) SecretInspectWithRaw(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SecretInspectWithRaw", arg0, arg1) +} + +func (_m *MockAPIClient) SecretList(_param0 context.Context, _param1 types.SecretListOptions) ([]swarm.Secret, error) { + ret := _m.ctrl.Call(_m, "SecretList", _param0, _param1) + ret0, _ := ret[0].([]swarm.Secret) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockAPIClientRecorder) SecretList(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SecretList", arg0, arg1) +} + +func (_m *MockAPIClient) SecretRemove(_param0 context.Context, _param1 string) error { + ret := _m.ctrl.Call(_m, "SecretRemove", _param0, _param1) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) SecretRemove(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SecretRemove", arg0, arg1) +} + func (_m *MockAPIClient) ServerVersion(_param0 context.Context) (types.Version, error) { ret := _m.ctrl.Call(_m, "ServerVersion", _param0) ret0, _ := ret[0].(types.Version) @@ -740,6 +931,17 @@ func (_mr *_MockAPIClientRecorder) ServiceList(arg0, arg1 interface{}) *gomock.C return _mr.mock.ctrl.RecordCall(_mr.mock, "ServiceList", arg0, arg1) } +func (_m *MockAPIClient) ServiceLogs(_param0 context.Context, _param1 string, _param2 types.ContainerLogsOptions) (io.ReadCloser, error) { + ret := _m.ctrl.Call(_m, "ServiceLogs", _param0, _param1, _param2) + ret0, _ := ret[0].(io.ReadCloser) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockAPIClientRecorder) ServiceLogs(arg0, arg1, arg2 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "ServiceLogs", arg0, arg1, arg2) +} + func (_m *MockAPIClient) ServiceRemove(_param0 context.Context, _param1 string) error { ret := _m.ctrl.Call(_m, "ServiceRemove", _param0, _param1) ret0, _ := ret[0].(error) @@ -760,6 +962,17 @@ func (_mr *_MockAPIClientRecorder) ServiceUpdate(arg0, arg1, arg2, arg3, arg4 in return _mr.mock.ctrl.RecordCall(_mr.mock, "ServiceUpdate", arg0, arg1, arg2, arg3, arg4) } +func (_m *MockAPIClient) SwarmGetUnlockKey(_param0 context.Context) (types.SwarmUnlockKeyResponse, error) { + ret := _m.ctrl.Call(_m, "SwarmGetUnlockKey", _param0) + ret0, _ := ret[0].(types.SwarmUnlockKeyResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +func (_mr *_MockAPIClientRecorder) SwarmGetUnlockKey(arg0 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SwarmGetUnlockKey", arg0) +} + func (_m *MockAPIClient) SwarmInit(_param0 context.Context, _param1 swarm.InitRequest) (string, error) { ret := _m.ctrl.Call(_m, "SwarmInit", _param0, _param1) ret0, _ := ret[0].(string) @@ -802,6 +1015,16 @@ func (_mr *_MockAPIClientRecorder) SwarmLeave(arg0, arg1 interface{}) *gomock.Ca return _mr.mock.ctrl.RecordCall(_mr.mock, "SwarmLeave", arg0, arg1) } +func (_m *MockAPIClient) SwarmUnlock(_param0 context.Context, _param1 swarm.UnlockRequest) error { + ret := _m.ctrl.Call(_m, "SwarmUnlock", _param0, _param1) + ret0, _ := ret[0].(error) + return ret0 +} + +func (_mr *_MockAPIClientRecorder) SwarmUnlock(arg0, arg1 interface{}) *gomock.Call { + return _mr.mock.ctrl.RecordCall(_mr.mock, "SwarmUnlock", arg0, arg1) +} + func (_m *MockAPIClient) SwarmUpdate(_param0 context.Context, _param1 swarm.Version, _param2 swarm.Spec, _param3 swarm.UpdateFlags) error { ret := _m.ctrl.Call(_m, "SwarmUpdate", _param0, _param1, _param2, _param3) ret0, _ := ret[0].(error) @@ -843,7 +1066,7 @@ func (_mr *_MockAPIClientRecorder) UpdateClientVersion(arg0 interface{}) *gomock return _mr.mock.ctrl.RecordCall(_mr.mock, "UpdateClientVersion", arg0) } -func (_m *MockAPIClient) VolumeCreate(_param0 context.Context, _param1 types.VolumeCreateRequest) (types.Volume, error) { +func (_m *MockAPIClient) VolumeCreate(_param0 context.Context, _param1 volume.VolumesCreateBody) (types.Volume, error) { ret := _m.ctrl.Call(_m, "VolumeCreate", _param0, _param1) ret0, _ := ret[0].(types.Volume) ret1, _ := ret[1].(error) @@ -877,9 +1100,9 @@ func (_mr *_MockAPIClientRecorder) VolumeInspectWithRaw(arg0, arg1 interface{}) return _mr.mock.ctrl.RecordCall(_mr.mock, "VolumeInspectWithRaw", arg0, arg1) } -func (_m *MockAPIClient) VolumeList(_param0 context.Context, _param1 filters.Args) (types.VolumesListResponse, error) { +func (_m *MockAPIClient) VolumeList(_param0 context.Context, _param1 filters.Args) (volume.VolumesListOKBody, error) { ret := _m.ctrl.Call(_m, "VolumeList", _param0, _param1) - ret0, _ := ret[0].(types.VolumesListResponse) + ret0, _ := ret[0].(volume.VolumesListOKBody) ret1, _ := ret[1].(error) return ret0, ret1 } diff --git a/plugin/flavor/swarm/flavor.go b/plugin/flavor/swarm/flavor.go index 6a6290602..78984d0b6 100644 --- a/plugin/flavor/swarm/flavor.go +++ b/plugin/flavor/swarm/flavor.go @@ -162,7 +162,7 @@ func (s swarmFlavor) Healthy(flavorProperties json.RawMessage, inst instance.Des filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("%s=%s", associationTag, associationID)) - nodes, err := s.client.NodeList(context.Background(), docker_types.NodeListOptions{Filter: filter}) + nodes, err := s.client.NodeList(context.Background(), docker_types.NodeListOptions{Filters: filter}) if err != nil { return flavor.Unknown, err } @@ -201,7 +201,7 @@ func (s swarmFlavor) Drain(flavorProperties json.RawMessage, inst instance.Descr filter := filters.NewArgs() filter.Add("label", fmt.Sprintf("%s=%s", associationTag, associationID)) - nodes, err := s.client.NodeList(context.Background(), docker_types.NodeListOptions{Filter: filter}) + nodes, err := s.client.NodeList(context.Background(), docker_types.NodeListOptions{Filters: filter}) if err != nil { return err } diff --git a/plugin/flavor/swarm/flavor_test.go b/plugin/flavor/swarm/flavor_test.go index 03590e66d..418a3c4d1 100644 --- a/plugin/flavor/swarm/flavor_test.go +++ b/plugin/flavor/swarm/flavor_test.go @@ -73,10 +73,10 @@ func TestWorker(t *testing.T) { } client.EXPECT().SwarmInspect(gomock.Any()).Return(swarmInfo, nil) - client.EXPECT().Info(gomock.Any()).Return(docker_types.Info{Swarm: swarm.Info{NodeID: "my-node-id"}}, nil) + client.EXPECT().Info(gomock.Any()).Return(infoResponse, nil) nodeInfo := swarm.Node{ManagerStatus: &swarm.ManagerStatus{Addr: "1.2.3.4"}} - client.EXPECT().NodeInspectWithRaw(gomock.Any(), "my-node-id").Return(nodeInfo, nil, nil) + client.EXPECT().NodeInspectWithRaw(gomock.Any(), nodeID).Return(nodeInfo, nil, nil) details, err := flavorImpl.Prepare( json.RawMessage(`{"Type": "worker"}`), @@ -103,7 +103,7 @@ func TestWorker(t *testing.T) { filter, err := filters.FromParam(fmt.Sprintf(`{"label": {"%s=%s": true}}`, associationTag, associationID)) require.NoError(t, err) - client.EXPECT().NodeList(gomock.Any(), docker_types.NodeListOptions{Filter: filter}).Return( + client.EXPECT().NodeList(gomock.Any(), docker_types.NodeListOptions{Filters: filter}).Return( []swarm.Node{ {}, }, nil) @@ -114,6 +114,10 @@ func TestWorker(t *testing.T) { require.Equal(t, flavor.Healthy, health) } +const nodeID = "my-node-id" + +var infoResponse = docker_types.Info{InfoBase: &docker_types.InfoBase{Swarm: swarm.Info{NodeID: nodeID}}} + func TestManager(t *testing.T) { ctrl := gomock.NewController(t) defer ctrl.Finish() @@ -131,10 +135,10 @@ func TestManager(t *testing.T) { } client.EXPECT().SwarmInspect(gomock.Any()).Return(swarmInfo, nil) - client.EXPECT().Info(gomock.Any()).Return(docker_types.Info{Swarm: swarm.Info{NodeID: "my-node-id"}}, nil) + client.EXPECT().Info(gomock.Any()).Return(infoResponse, nil) nodeInfo := swarm.Node{ManagerStatus: &swarm.ManagerStatus{Addr: "1.2.3.4"}} - client.EXPECT().NodeInspectWithRaw(gomock.Any(), "my-node-id").Return(nodeInfo, nil, nil) + client.EXPECT().NodeInspectWithRaw(gomock.Any(), nodeID).Return(nodeInfo, nil, nil) id := instance.LogicalID("127.0.0.1") details, err := flavorImpl.Prepare( @@ -162,7 +166,7 @@ func TestManager(t *testing.T) { filter, err := filters.FromParam(fmt.Sprintf(`{"label": {"%s=%s": true}}`, associationTag, associationID)) require.NoError(t, err) - client.EXPECT().NodeList(gomock.Any(), docker_types.NodeListOptions{Filter: filter}).Return( + client.EXPECT().NodeList(gomock.Any(), docker_types.NodeListOptions{Filters: filter}).Return( []swarm.Node{ {}, }, nil) diff --git a/rpc/client/client.go b/rpc/client/client.go new file mode 100644 index 000000000..fdc5c645a --- /dev/null +++ b/rpc/client/client.go @@ -0,0 +1,39 @@ +package client + +import ( + "bytes" + "github.com/gorilla/rpc/v2/json" + "net" + "net/http" +) + +// Client is an HTTP client for sending JSON-RPC requests. +type Client struct { + http http.Client +} + +// New creates a new Client that communicates with a unix socke. +func New(socketPath string) Client { + dialUnix := func(proto, addr string) (conn net.Conn, err error) { + return net.Dial("unix", socketPath) + } + + return Client{http: http.Client{Transport: &http.Transport{Dial: dialUnix}}} +} + +// Call sends an RPC with a method and argument. The result must be a pointer to the response object. +func (c Client) Call(method string, arg interface{}, result interface{}) error { + message, err := json.EncodeClientRequest(method, arg) + if err != nil { + return err + } + + resp, err := c.http.Post("http://d/rpc", "application/json", bytes.NewReader(message)) + if err != nil { + return err + } + + defer resp.Body.Close() + + return json.DecodeClientResponse(resp.Body, result) +} diff --git a/rpc/flavor/client.go b/rpc/flavor/client.go index 7bc698abc..cdc636187 100644 --- a/rpc/flavor/client.go +++ b/rpc/flavor/client.go @@ -2,42 +2,35 @@ package flavor import ( "encoding/json" - "net" - "net/rpc" - "net/rpc/jsonrpc" - "github.com/docker/infrakit/plugin/group/types" + rpc_client "github.com/docker/infrakit/rpc/client" "github.com/docker/infrakit/spi/flavor" "github.com/docker/infrakit/spi/instance" ) // NewClient returns a plugin interface implementation connected to a remote plugin -func NewClient(protocol, addr string) (flavor.Plugin, error) { - conn, err := net.Dial(protocol, addr) - if err != nil { - return nil, err - } - return &client{rpc: jsonrpc.NewClient(conn)}, nil +func NewClient(socketPath string) flavor.Plugin { + return &client{client: rpc_client.New(socketPath)} } type client struct { - rpc *rpc.Client + client rpc_client.Client } // Validate checks whether the helper can support a configuration. -func (c *client) Validate(flavorProperties json.RawMessage, allocation types.AllocationMethod) error { - req := &ValidateRequest{Properties: flavorProperties, Allocation: allocation} - resp := &ValidateResponse{} - return c.rpc.Call("Flavor.Validate", req, resp) +func (c client) Validate(flavorProperties json.RawMessage, allocation types.AllocationMethod) error { + req := ValidateRequest{Properties: &flavorProperties, Allocation: allocation} + resp := ValidateResponse{} + return c.client.Call("Flavor.Validate", req, &resp) } // Prepare allows the Flavor to modify the provisioning instructions for an instance. For example, a // helper could be used to place additional tags on the machine, or generate a specialized Init command based on // the flavor configuration. -func (c *client) Prepare(flavorProperties json.RawMessage, spec instance.Spec, allocation types.AllocationMethod) (instance.Spec, error) { - req := &PrepareRequest{Properties: flavorProperties, Spec: spec, Allocation: allocation} - resp := &PrepareResponse{} - err := c.rpc.Call("Flavor.Prepare", req, resp) +func (c client) Prepare(flavorProperties json.RawMessage, spec instance.Spec, allocation types.AllocationMethod) (instance.Spec, error) { + req := PrepareRequest{Properties: &flavorProperties, Spec: spec, Allocation: allocation} + resp := PrepareResponse{} + err := c.client.Call("Flavor.Prepare", req, &resp) if err != nil { return spec, err } @@ -45,18 +38,18 @@ func (c *client) Prepare(flavorProperties json.RawMessage, spec instance.Spec, a } // Healthy determines the Health of this Flavor on an instance. -func (c *client) Healthy(flavorProperties json.RawMessage, inst instance.Description) (flavor.Health, error) { - req := &HealthyRequest{Properties: flavorProperties, Instance: inst} - resp := &HealthyResponse{} - err := c.rpc.Call("Flavor.Healthy", req, resp) +func (c client) Healthy(flavorProperties json.RawMessage, inst instance.Description) (flavor.Health, error) { + req := HealthyRequest{Properties: &flavorProperties, Instance: inst} + resp := HealthyResponse{} + err := c.client.Call("Flavor.Healthy", req, &resp) return resp.Health, err } // Drain allows the flavor to perform a best-effort cleanup operation before the instance is destroyed. -func (c *client) Drain(flavorProperties json.RawMessage, inst instance.Description) error { - req := &DrainRequest{Properties: flavorProperties, Instance: inst} - resp := &DrainResponse{} - err := c.rpc.Call("Flavor.Drain", req, resp) +func (c client) Drain(flavorProperties json.RawMessage, inst instance.Description) error { + req := DrainRequest{Properties: &flavorProperties, Instance: inst} + resp := DrainResponse{} + err := c.client.Call("Flavor.Drain", req, &resp) if err != nil { return err } diff --git a/rpc/flavor/rpc_test.go b/rpc/flavor/rpc_test.go index ed867ece6..7c20295e8 100644 --- a/rpc/flavor/rpc_test.go +++ b/rpc/flavor/rpc_test.go @@ -6,7 +6,7 @@ import ( "testing" "github.com/docker/infrakit/plugin/group/types" - "github.com/docker/infrakit/rpc" + rpc_server "github.com/docker/infrakit/rpc/server" "github.com/docker/infrakit/spi/flavor" "github.com/docker/infrakit/spi/instance" "github.com/stretchr/testify/require" @@ -25,12 +25,6 @@ func tempSocket() string { return path.Join(dir, "flavor-impl-test") } -func testClient(t *testing.T, socket string) flavor.Plugin { - cl, err := NewClient("unix", socket) - require.NoError(t, err) - return cl -} - type testPlugin struct { DoValidate func(flavorProperties json.RawMessage, allocation types.AllocationMethod) error DoPrepare func( @@ -67,7 +61,7 @@ func TestFlavorPluginValidate(t *testing.T) { inputFlavorPropertiesActual := make(chan json.RawMessage, 1) inputFlavorProperties := json.RawMessage([]byte(`{"flavor":"zookeeper","role":"leader"}`)) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoValidate: func(flavorProperties json.RawMessage, allocation types.AllocationMethod) error { inputFlavorPropertiesActual <- flavorProperties return nil @@ -75,9 +69,9 @@ func TestFlavorPluginValidate(t *testing.T) { })) require.NoError(t, err) - require.NoError(t, testClient(t, socketPath).Validate(inputFlavorProperties, allocation)) + require.NoError(t, NewClient(socketPath).Validate(inputFlavorProperties, allocation)) - close(stop) + server.Stop() require.Equal(t, inputFlavorProperties, <-inputFlavorPropertiesActual) } @@ -88,7 +82,7 @@ func TestFlavorPluginValidateError(t *testing.T) { inputFlavorPropertiesActual := make(chan json.RawMessage, 1) inputFlavorProperties := json.RawMessage([]byte(`{"flavor":"zookeeper","role":"leader"}`)) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoValidate: func(flavorProperties json.RawMessage, allocation types.AllocationMethod) error { inputFlavorPropertiesActual <- flavorProperties return errors.New("something-went-wrong") @@ -96,11 +90,11 @@ func TestFlavorPluginValidateError(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).Validate(inputFlavorProperties, allocation) + err = NewClient(socketPath).Validate(inputFlavorProperties, allocation) require.Error(t, err) require.Equal(t, "something-went-wrong", err.Error()) - close(stop) + server.Stop() require.Equal(t, inputFlavorProperties, <-inputFlavorPropertiesActual) } @@ -115,7 +109,7 @@ func TestFlavorPluginPrepare(t *testing.T) { Tags: map[string]string{"foo": "bar"}, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoPrepare: func( flavorProperties json.RawMessage, instanceSpec instance.Spec, @@ -129,14 +123,14 @@ func TestFlavorPluginPrepare(t *testing.T) { })) require.NoError(t, err) - spec, err := testClient(t, socketPath).Prepare( + spec, err := NewClient(socketPath).Prepare( inputFlavorProperties, inputInstanceSpec, allocation) require.NoError(t, err) require.Equal(t, inputInstanceSpec, spec) - close(stop) + server.Stop() require.Equal(t, inputFlavorProperties, <-inputFlavorPropertiesActual) require.Equal(t, inputInstanceSpec, <-inputInstanceSpecActual) @@ -153,7 +147,7 @@ func TestFlavorPluginPrepareError(t *testing.T) { Tags: map[string]string{"foo": "bar"}, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoPrepare: func( flavorProperties json.RawMessage, instanceSpec instance.Spec, @@ -167,14 +161,14 @@ func TestFlavorPluginPrepareError(t *testing.T) { })) require.NoError(t, err) - _, err = testClient(t, socketPath).Prepare( + _, err = NewClient(socketPath).Prepare( inputFlavorProperties, inputInstanceSpec, allocation) require.Error(t, err) require.Equal(t, "bad-thing-happened", err.Error()) - close(stop) + server.Stop() require.Equal(t, inputFlavorProperties, <-inputFlavorPropertiesActual) require.Equal(t, inputInstanceSpec, <-inputInstanceSpecActual) @@ -190,7 +184,7 @@ func TestFlavorPluginHealthy(t *testing.T) { ID: instance.ID("foo"), Tags: map[string]string{"foo": "bar"}, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoHealthy: func(properties json.RawMessage, inst instance.Description) (flavor.Health, error) { inputPropertiesActual <- properties inputInstanceActual <- inst @@ -199,13 +193,13 @@ func TestFlavorPluginHealthy(t *testing.T) { })) require.NoError(t, err) - health, err := testClient(t, socketPath).Healthy(inputProperties, inputInstance) + health, err := NewClient(socketPath).Healthy(inputProperties, inputInstance) require.NoError(t, err) require.Equal(t, flavor.Healthy, health) require.Equal(t, inputProperties, <-inputPropertiesActual) require.Equal(t, inputInstance, <-inputInstanceActual) - close(stop) + server.Stop() } func TestFlavorPluginHealthyError(t *testing.T) { @@ -218,7 +212,7 @@ func TestFlavorPluginHealthyError(t *testing.T) { ID: instance.ID("foo"), Tags: map[string]string{"foo": "bar"}, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoHealthy: func(flavorProperties json.RawMessage, inst instance.Description) (flavor.Health, error) { inputPropertiesActual <- flavorProperties inputInstanceActual <- inst @@ -227,13 +221,13 @@ func TestFlavorPluginHealthyError(t *testing.T) { })) require.NoError(t, err) - _, err = testClient(t, socketPath).Healthy(inputProperties, inputInstance) + _, err = NewClient(socketPath).Healthy(inputProperties, inputInstance) require.Error(t, err) require.Equal(t, "oh-noes", err.Error()) require.Equal(t, inputProperties, <-inputPropertiesActual) require.Equal(t, inputInstance, <-inputInstanceActual) - close(stop) + server.Stop() } func TestFlavorPluginDrain(t *testing.T) { @@ -246,7 +240,7 @@ func TestFlavorPluginDrain(t *testing.T) { ID: instance.ID("foo"), Tags: map[string]string{"foo": "bar"}, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDrain: func(properties json.RawMessage, inst instance.Description) error { inputPropertiesActual <- properties inputInstanceActual <- inst @@ -255,11 +249,11 @@ func TestFlavorPluginDrain(t *testing.T) { })) require.NoError(t, err) - require.NoError(t, testClient(t, socketPath).Drain(inputProperties, inputInstance)) + require.NoError(t, NewClient(socketPath).Drain(inputProperties, inputInstance)) require.Equal(t, inputProperties, <-inputPropertiesActual) require.Equal(t, inputInstance, <-inputInstanceActual) - close(stop) + server.Stop() } func TestFlavorPluginDrainError(t *testing.T) { @@ -272,7 +266,7 @@ func TestFlavorPluginDrainError(t *testing.T) { ID: instance.ID("foo"), Tags: map[string]string{"foo": "bar"}, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDrain: func(flavorProperties json.RawMessage, inst instance.Description) error { inputPropertiesActual <- flavorProperties inputInstanceActual <- inst @@ -281,11 +275,11 @@ func TestFlavorPluginDrainError(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).Drain(inputProperties, inputInstance) + err = NewClient(socketPath).Drain(inputProperties, inputInstance) require.Error(t, err) require.Equal(t, "oh-noes", err.Error()) require.Equal(t, inputProperties, <-inputPropertiesActual) require.Equal(t, inputInstance, <-inputInstanceActual) - close(stop) + server.Stop() } diff --git a/rpc/flavor/service.go b/rpc/flavor/service.go index 0d63eeb7a..6483b8a7a 100644 --- a/rpc/flavor/service.go +++ b/rpc/flavor/service.go @@ -2,10 +2,11 @@ package flavor import ( "github.com/docker/infrakit/spi/flavor" + "net/http" ) // PluginServer returns a RPCService that conforms to the net/rpc rpc call convention. -func PluginServer(p flavor.Plugin) RPCService { +func PluginServer(p flavor.Plugin) *Flavor { return &Flavor{plugin: p} } @@ -15,8 +16,8 @@ type Flavor struct { } // Validate checks whether the helper can support a configuration. -func (p *Flavor) Validate(req *ValidateRequest, resp *ValidateResponse) error { - err := p.plugin.Validate(req.Properties, req.Allocation) +func (p *Flavor) Validate(_ *http.Request, req *ValidateRequest, resp *ValidateResponse) error { + err := p.plugin.Validate(*req.Properties, req.Allocation) if err != nil { return err } @@ -27,8 +28,8 @@ func (p *Flavor) Validate(req *ValidateRequest, resp *ValidateResponse) error { // Prepare allows the Flavor to modify the provisioning instructions for an instance. For example, a // helper could be used to place additional tags on the machine, or generate a specialized Init command based on // the flavor configuration. -func (p *Flavor) Prepare(req *PrepareRequest, resp *PrepareResponse) error { - spec, err := p.plugin.Prepare(req.Properties, req.Spec, req.Allocation) +func (p *Flavor) Prepare(_ *http.Request, req *PrepareRequest, resp *PrepareResponse) error { + spec, err := p.plugin.Prepare(*req.Properties, req.Spec, req.Allocation) if err != nil { return err } @@ -37,8 +38,8 @@ func (p *Flavor) Prepare(req *PrepareRequest, resp *PrepareResponse) error { } // Healthy determines whether an instance is healthy. -func (p *Flavor) Healthy(req *HealthyRequest, resp *HealthyResponse) error { - health, err := p.plugin.Healthy(req.Properties, req.Instance) +func (p *Flavor) Healthy(_ *http.Request, req *HealthyRequest, resp *HealthyResponse) error { + health, err := p.plugin.Healthy(*req.Properties, req.Instance) if err != nil { return err } @@ -47,8 +48,8 @@ func (p *Flavor) Healthy(req *HealthyRequest, resp *HealthyResponse) error { } // Drain drains the instance. It's the inverse of prepare before provision and happens before destroy. -func (p *Flavor) Drain(req *DrainRequest, resp *DrainResponse) error { - err := p.plugin.Drain(req.Properties, req.Instance) +func (p *Flavor) Drain(_ *http.Request, req *DrainRequest, resp *DrainResponse) error { + err := p.plugin.Drain(*req.Properties, req.Instance) if err != nil { return err } diff --git a/rpc/flavor/types.go b/rpc/flavor/types.go index 45548780f..dcf88b1a0 100644 --- a/rpc/flavor/types.go +++ b/rpc/flavor/types.go @@ -10,7 +10,7 @@ import ( // ValidateRequest is the rpc wrapper for request parameters to Validate type ValidateRequest struct { - Properties json.RawMessage + Properties *json.RawMessage Allocation types.AllocationMethod } @@ -21,7 +21,7 @@ type ValidateResponse struct { // PrepareRequest is the rpc wrapper of the params to Prepare type PrepareRequest struct { - Properties json.RawMessage + Properties *json.RawMessage Spec instance.Spec Allocation types.AllocationMethod } @@ -33,7 +33,7 @@ type PrepareResponse struct { // HealthyRequest is the rpc wrapper of the params to Healthy type HealthyRequest struct { - Properties json.RawMessage + Properties *json.RawMessage Instance instance.Description } @@ -44,7 +44,7 @@ type HealthyResponse struct { // DrainRequest is the rpc wrapper of the params to Drain type DrainRequest struct { - Properties json.RawMessage + Properties *json.RawMessage Instance instance.Description } @@ -52,20 +52,3 @@ type DrainRequest struct { type DrainResponse struct { OK bool } - -// RPCService is the interface for exposing the plugin methods to rpc -type RPCService interface { - - // Validate is the rpc method for flavor Validate - Validate(req *ValidateRequest, resp *ValidateResponse) error - - // Prepare is the rpc method for flavor Prepare - Prepare(req *PrepareRequest, resp *PrepareResponse) error - - // Healthy is the rpc method for flavor Healthy - Healthy(req *HealthyRequest, resp *HealthyResponse) error - - // Drain is the rpc method for flavor Drain - // It's the inverse of prepare before provision and happens before destroy. - Drain(req *DrainRequest, resp *DrainResponse) error -} diff --git a/rpc/group/client.go b/rpc/group/client.go index ce1d61ea8..fa24d7b4f 100644 --- a/rpc/group/client.go +++ b/rpc/group/client.go @@ -1,40 +1,33 @@ package group import ( - "net" - "net/rpc" - "net/rpc/jsonrpc" - + rpc_client "github.com/docker/infrakit/rpc/client" "github.com/docker/infrakit/spi/group" ) // NewClient returns a plugin interface implementation connected to a remote plugin -func NewClient(protocol, addr string) (group.Plugin, error) { - conn, err := net.Dial(protocol, addr) - if err != nil { - return nil, err - } - return &client{rpc: jsonrpc.NewClient(conn)}, nil +func NewClient(socketPath string) group.Plugin { + return &client{client: rpc_client.New(socketPath)} } type client struct { - rpc *rpc.Client + client rpc_client.Client } -func (c *client) CommitGroup(grp group.Spec, pretend bool) (string, error) { - req := &CommitGroupRequest{Spec: grp, Pretend: pretend} - resp := &CommitGroupResponse{} - err := c.rpc.Call("Group.CommitGroup", req, resp) +func (c client) CommitGroup(grp group.Spec, pretend bool) (string, error) { + req := CommitGroupRequest{Spec: grp, Pretend: pretend} + resp := CommitGroupResponse{} + err := c.client.Call("Group.CommitGroup", req, &resp) if err != nil { return resp.Details, err } return resp.Details, nil } -func (c *client) FreeGroup(id group.ID) error { - req := &FreeGroupRequest{ID: id} - resp := &FreeGroupResponse{} - err := c.rpc.Call("Group.FreeGroup", req, resp) +func (c client) FreeGroup(id group.ID) error { + req := FreeGroupRequest{ID: id} + resp := FreeGroupResponse{} + err := c.client.Call("Group.FreeGroup", req, &resp) if err != nil { return err } @@ -42,17 +35,17 @@ func (c *client) FreeGroup(id group.ID) error { return nil } -func (c *client) DescribeGroup(id group.ID) (group.Description, error) { - req := &DescribeGroupRequest{ID: id} - resp := &DescribeGroupResponse{} - err := c.rpc.Call("Group.DescribeGroup", req, resp) +func (c client) DescribeGroup(id group.ID) (group.Description, error) { + req := DescribeGroupRequest{ID: id} + resp := DescribeGroupResponse{} + err := c.client.Call("Group.DescribeGroup", req, &resp) return resp.Description, err } -func (c *client) DestroyGroup(id group.ID) error { - req := &DestroyGroupRequest{ID: id} - resp := &DestroyGroupResponse{} - err := c.rpc.Call("Group.DestroyGroup", req, resp) +func (c client) DestroyGroup(id group.ID) error { + req := DestroyGroupRequest{ID: id} + resp := DestroyGroupResponse{} + err := c.client.Call("Group.DestroyGroup", req, &resp) if err != nil { return err } @@ -60,9 +53,9 @@ func (c *client) DestroyGroup(id group.ID) error { return nil } -func (c *client) InspectGroups() ([]group.Spec, error) { - req := &InspectGroupsRequest{} - resp := &InspectGroupsResponse{} - err := c.rpc.Call("Group.InspectGroups", req, resp) +func (c client) InspectGroups() ([]group.Spec, error) { + req := InspectGroupsRequest{} + resp := InspectGroupsResponse{} + err := c.client.Call("Group.InspectGroups", req, &resp) return resp.Groups, err } diff --git a/rpc/group/rpc_test.go b/rpc/group/rpc_test.go index e2bf4011b..14bdedf5c 100644 --- a/rpc/group/rpc_test.go +++ b/rpc/group/rpc_test.go @@ -5,7 +5,7 @@ import ( "errors" "testing" - "github.com/docker/infrakit/rpc" + rpc_server "github.com/docker/infrakit/rpc/server" "github.com/docker/infrakit/spi/group" "github.com/docker/infrakit/spi/instance" "github.com/stretchr/testify/require" @@ -21,12 +21,6 @@ type testPlugin struct { DoInspectGroups func() ([]group.Spec, error) } -func testClient(t *testing.T, socket string) group.Plugin { - cl, err := NewClient("unix", socket) - require.NoError(t, err) - return cl -} - func (t *testPlugin) CommitGroup(grp group.Spec, pretend bool) (string, error) { return t.DoCommitGroup(grp, pretend) } @@ -62,20 +56,19 @@ func TestGroupPluginCommitGroup(t *testing.T) { Properties: &raw, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoCommitGroup: func(req group.Spec, pretend bool) (string, error) { groupSpecActual <- req return "commit details", nil }, })) - require.NoError(t, err) // Make call - details, err := testClient(t, socketPath).CommitGroup(groupSpec, false) + details, err := NewClient(socketPath).CommitGroup(groupSpec, false) require.NoError(t, err) require.Equal(t, "commit details", details) - close(stop) + server.Stop() require.Equal(t, groupSpec, <-groupSpecActual) } @@ -90,7 +83,7 @@ func TestGroupPluginCommitGroupError(t *testing.T) { Properties: &raw, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoCommitGroup: func(req group.Spec, pretend bool) (string, error) { groupSpecActual <- req return "", errors.New("error") @@ -98,11 +91,11 @@ func TestGroupPluginCommitGroupError(t *testing.T) { })) require.NoError(t, err) - _, err = testClient(t, socketPath).CommitGroup(groupSpec, false) + _, err = NewClient(socketPath).CommitGroup(groupSpec, false) require.Error(t, err) require.Equal(t, "error", err.Error()) - close(stop) + server.Stop() require.Equal(t, groupSpec, <-groupSpecActual) } @@ -112,7 +105,7 @@ func TestGroupPluginFreeGroup(t *testing.T) { id := group.ID("group") idActual := make(chan group.ID, 1) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoFreeGroup: func(req group.ID) error { idActual <- req return nil @@ -120,10 +113,10 @@ func TestGroupPluginFreeGroup(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).FreeGroup(id) + err = NewClient(socketPath).FreeGroup(id) require.NoError(t, err) - close(stop) + server.Stop() require.Equal(t, id, <-idActual) } @@ -132,7 +125,7 @@ func TestGroupPluginFreeGroupError(t *testing.T) { id := group.ID("group") idActual := make(chan group.ID, 1) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoFreeGroup: func(req group.ID) error { idActual <- req return errors.New("no") @@ -140,11 +133,11 @@ func TestGroupPluginFreeGroupError(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).FreeGroup(id) + err = NewClient(socketPath).FreeGroup(id) require.Error(t, err) require.Equal(t, "no", err.Error()) - close(stop) + server.Stop() require.Equal(t, id, <-idActual) } @@ -153,7 +146,7 @@ func TestGroupPluginDestroyGroup(t *testing.T) { id := group.ID("group") idActual := make(chan group.ID, 1) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDestroyGroup: func(req group.ID) error { idActual <- req return nil @@ -161,10 +154,10 @@ func TestGroupPluginDestroyGroup(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).DestroyGroup(id) + err = NewClient(socketPath).DestroyGroup(id) require.NoError(t, err) - close(stop) + server.Stop() require.Equal(t, id, <-idActual) } @@ -173,7 +166,7 @@ func TestGroupPluginDestroyGroupError(t *testing.T) { id := group.ID("group") idActual := make(chan group.ID, 1) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDestroyGroup: func(req group.ID) error { idActual <- req return errors.New("no") @@ -181,11 +174,11 @@ func TestGroupPluginDestroyGroupError(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).DestroyGroup(id) + err = NewClient(socketPath).DestroyGroup(id) require.Error(t, err) require.Equal(t, "no", err.Error()) - close(stop) + server.Stop() require.Equal(t, id, <-idActual) } @@ -201,19 +194,18 @@ func TestGroupPluginInspectGroup(t *testing.T) { }, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDescribeGroup: func(req group.ID) (group.Description, error) { idActual <- req return desc, nil }, })) - require.NoError(t, err) - res, err := testClient(t, socketPath).DescribeGroup(id) + res, err := NewClient(socketPath).DescribeGroup(id) require.NoError(t, err) require.Equal(t, desc, res) - close(stop) + server.Stop() require.Equal(t, id, <-idActual) } @@ -228,7 +220,7 @@ func TestGroupPluginInspectGroupError(t *testing.T) { }, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDescribeGroup: func(req group.ID) (group.Description, error) { idActual <- req return desc, errors.New("no") @@ -236,10 +228,10 @@ func TestGroupPluginInspectGroupError(t *testing.T) { })) require.NoError(t, err) - _, err = testClient(t, socketPath).DescribeGroup(id) + _, err = NewClient(socketPath).DescribeGroup(id) require.Error(t, err) require.Equal(t, "no", err.Error()) - close(stop) + server.Stop() require.Equal(t, id, <-idActual) } diff --git a/rpc/group/service.go b/rpc/group/service.go index 6572dc784..2a6446eae 100644 --- a/rpc/group/service.go +++ b/rpc/group/service.go @@ -2,10 +2,11 @@ package group import ( "github.com/docker/infrakit/spi/group" + "net/http" ) // PluginServer returns a RPCService that conforms to the net/rpc rpc call convention. -func PluginServer(p group.Plugin) RPCService { +func PluginServer(p group.Plugin) *Group { return &Group{plugin: p} } @@ -15,7 +16,7 @@ type Group struct { } // CommitGroup is the rpc method to commit a group -func (p *Group) CommitGroup(req *CommitGroupRequest, resp *CommitGroupResponse) error { +func (p *Group) CommitGroup(_ *http.Request, req *CommitGroupRequest, resp *CommitGroupResponse) error { details, err := p.plugin.CommitGroup(req.Spec, req.Pretend) if err != nil { return err @@ -25,7 +26,7 @@ func (p *Group) CommitGroup(req *CommitGroupRequest, resp *CommitGroupResponse) } // FreeGroup is the rpc method to free a group -func (p *Group) FreeGroup(req *FreeGroupRequest, resp *FreeGroupResponse) error { +func (p *Group) FreeGroup(_ *http.Request, req *FreeGroupRequest, resp *FreeGroupResponse) error { err := p.plugin.FreeGroup(req.ID) if err != nil { return err @@ -35,7 +36,7 @@ func (p *Group) FreeGroup(req *FreeGroupRequest, resp *FreeGroupResponse) error } // DescribeGroup is the rpc method to describe a group -func (p *Group) DescribeGroup(req *DescribeGroupRequest, resp *DescribeGroupResponse) error { +func (p *Group) DescribeGroup(_ *http.Request, req *DescribeGroupRequest, resp *DescribeGroupResponse) error { desc, err := p.plugin.DescribeGroup(req.ID) if err != nil { return err @@ -45,7 +46,7 @@ func (p *Group) DescribeGroup(req *DescribeGroupRequest, resp *DescribeGroupResp } // DestroyGroup is the rpc method to destroy a group -func (p *Group) DestroyGroup(req *DestroyGroupRequest, resp *DestroyGroupResponse) error { +func (p *Group) DestroyGroup(_ *http.Request, req *DestroyGroupRequest, resp *DestroyGroupResponse) error { err := p.plugin.DestroyGroup(req.ID) if err != nil { return err @@ -55,7 +56,7 @@ func (p *Group) DestroyGroup(req *DestroyGroupRequest, resp *DestroyGroupRespons } // InspectGroups is the rpc method to inspect groups -func (p *Group) InspectGroups(req *InspectGroupsRequest, resp *InspectGroupsResponse) error { +func (p *Group) InspectGroups(_ *http.Request, req *InspectGroupsRequest, resp *InspectGroupsResponse) error { groups, err := p.plugin.InspectGroups() if err != nil { return err diff --git a/rpc/group/types.go b/rpc/group/types.go index 5ce91978b..0bc72c79c 100644 --- a/rpc/group/types.go +++ b/rpc/group/types.go @@ -54,13 +54,3 @@ type InspectGroupsRequest struct { type InspectGroupsResponse struct { Groups []group.Spec } - -// RPCService is the interface for exposing the group plugin as a RPC service. It conforms to the call conventions -// defined in net/rpc -type RPCService interface { - CommitGroup(req *CommitGroupRequest, resp *CommitGroupResponse) error - FreeGroup(req *FreeGroupRequest, resp *FreeGroupResponse) error - DescribeGroup(req *DescribeGroupRequest, resp *DescribeGroupResponse) error - DestroyGroup(req *DestroyGroupRequest, resp *DestroyGroupResponse) error - InspectGroups(req *InspectGroupsRequest, resp *InspectGroupsResponse) error -} diff --git a/rpc/instance/client.go b/rpc/instance/client.go index a890bd398..a4f173a31 100644 --- a/rpc/instance/client.go +++ b/rpc/instance/client.go @@ -2,56 +2,53 @@ package instance import ( "encoding/json" - "net" - "net/rpc" - "net/rpc/jsonrpc" - + rpc_client "github.com/docker/infrakit/rpc/client" "github.com/docker/infrakit/spi/instance" ) -// NewClient returns a plugin interface implementation connected to a remote plugin -func NewClient(protocol, addr string) (instance.Plugin, error) { - conn, err := net.Dial(protocol, addr) - if err != nil { - return nil, err - } - return &client{rpc: jsonrpc.NewClient(conn)}, nil +// NewClient returns a plugin interface implementation connected to a plugin +func NewClient(socketPath string) instance.Plugin { + return &client{client: rpc_client.New(socketPath)} } type client struct { - rpc *rpc.Client + client rpc_client.Client } // Validate performs local validation on a provision request. -func (c *client) Validate(properties json.RawMessage) error { - req := &ValidateRequest{Properties: properties} - resp := &ValidateResponse{} - return c.rpc.Call("Instance.Validate", req, resp) +func (c client) Validate(properties json.RawMessage) error { + req := ValidateRequest{Properties: &properties} + resp := ValidateResponse{} + + return c.client.Call("Instance.Validate", req, &resp) } // Provision creates a new instance based on the spec. -func (c *client) Provision(spec instance.Spec) (*instance.ID, error) { - req := &ProvisionRequest{Spec: spec} - resp := &ProvisionResponse{} - err := c.rpc.Call("Instance.Provision", req, resp) - if err != nil { +func (c client) Provision(spec instance.Spec) (*instance.ID, error) { + req := ProvisionRequest{Spec: spec} + resp := ProvisionResponse{} + + if err := c.client.Call("Instance.Provision", req, &resp); err != nil { return nil, err } + return resp.ID, nil } // Destroy terminates an existing instance. -func (c *client) Destroy(instance instance.ID) error { - req := &DestroyRequest{Instance: instance} - resp := &DestroyResponse{} - return c.rpc.Call("Instance.Destroy", req, resp) +func (c client) Destroy(instance instance.ID) error { + req := DestroyRequest{Instance: instance} + resp := DestroyResponse{} + + return c.client.Call("Instance.Destroy", req, &resp) } // DescribeInstances returns descriptions of all instances matching all of the provided tags. -func (c *client) DescribeInstances(tags map[string]string) ([]instance.Description, error) { - req := &DescribeInstancesRequest{Tags: tags} - resp := &DescribeInstancesResponse{} - err := c.rpc.Call("Instance.DescribeInstances", req, resp) +func (c client) DescribeInstances(tags map[string]string) ([]instance.Description, error) { + req := DescribeInstancesRequest{Tags: tags} + resp := DescribeInstancesResponse{} + + err := c.client.Call("Instance.DescribeInstances", req, &resp) if err != nil { return nil, err } diff --git a/rpc/instance/rpc_test.go b/rpc/instance/rpc_test.go index d25dd6c76..7674a477e 100644 --- a/rpc/instance/rpc_test.go +++ b/rpc/instance/rpc_test.go @@ -5,7 +5,7 @@ import ( "errors" "testing" - "github.com/docker/infrakit/rpc" + rpc_server "github.com/docker/infrakit/rpc/server" "github.com/docker/infrakit/spi/instance" "github.com/stretchr/testify/require" "io/ioutil" @@ -48,12 +48,6 @@ func tempSocket() string { return path.Join(dir, "instance-impl-test") } -func testClient(t *testing.T, socket string) instance.Plugin { - cl, err := NewClient("unix", socket) - require.NoError(t, err) - return cl -} - func TestInstancePluginValidate(t *testing.T) { socketPath := tempSocket() @@ -62,7 +56,7 @@ func TestInstancePluginValidate(t *testing.T) { rawActual := make(chan json.RawMessage, 1) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoValidate: func(req json.RawMessage) error { rawActual <- req @@ -72,10 +66,10 @@ func TestInstancePluginValidate(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).Validate(raw) + err = NewClient(socketPath).Validate(raw) require.NoError(t, err) - close(stop) + server.Stop() require.Equal(t, raw, <-rawActual) } @@ -86,7 +80,7 @@ func TestInstancePluginValidateError(t *testing.T) { raw := json.RawMessage([]byte(`{"name":"instance","type":"xlarge"}`)) rawActual := make(chan json.RawMessage, 1) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoValidate: func(req json.RawMessage) error { rawActual <- req @@ -96,11 +90,11 @@ func TestInstancePluginValidateError(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).Validate(raw) + err = NewClient(socketPath).Validate(raw) require.Error(t, err) require.Equal(t, "whoops", err.Error()) - close(stop) + server.Stop() require.Equal(t, raw, <-rawActual) } @@ -112,7 +106,7 @@ func TestInstancePluginProvisionNil(t *testing.T) { spec := instance.Spec{ Properties: &raw, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoProvision: func(req instance.Spec) (*instance.ID, error) { specActual <- req return nil, nil @@ -121,11 +115,11 @@ func TestInstancePluginProvisionNil(t *testing.T) { require.NoError(t, err) var id *instance.ID - id, err = testClient(t, socketPath).Provision(spec) + id, err = NewClient(socketPath).Provision(spec) require.NoError(t, err) require.Nil(t, id) - close(stop) + server.Stop() require.Equal(t, spec, <-specActual) } @@ -138,7 +132,7 @@ func TestInstancePluginProvision(t *testing.T) { spec := instance.Spec{ Properties: &raw, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoProvision: func(req instance.Spec) (*instance.ID, error) { specActual <- req v := instance.ID("test") @@ -148,11 +142,11 @@ func TestInstancePluginProvision(t *testing.T) { require.NoError(t, err) var id *instance.ID - id, err = testClient(t, socketPath).Provision(spec) + id, err = NewClient(socketPath).Provision(spec) require.NoError(t, err) require.Equal(t, "test", string(*id)) - close(stop) + server.Stop() require.Equal(t, spec, <-specActual) } @@ -165,7 +159,7 @@ func TestInstancePluginProvisionError(t *testing.T) { spec := instance.Spec{ Properties: &raw, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoProvision: func(req instance.Spec) (*instance.ID, error) { specActual <- req return nil, errors.New("nope") @@ -173,11 +167,11 @@ func TestInstancePluginProvisionError(t *testing.T) { })) require.NoError(t, err) - _, err = testClient(t, socketPath).Provision(spec) + _, err = NewClient(socketPath).Provision(spec) require.Error(t, err) require.Equal(t, "nope", err.Error()) - close(stop) + server.Stop() require.Equal(t, spec, <-specActual) } @@ -188,7 +182,7 @@ func TestInstancePluginDestroy(t *testing.T) { inst := instance.ID("hello") instActual := make(chan instance.ID, 1) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDestroy: func(req instance.ID) error { instActual <- req return nil @@ -196,10 +190,10 @@ func TestInstancePluginDestroy(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).Destroy(inst) + err = NewClient(socketPath).Destroy(inst) require.NoError(t, err) - close(stop) + server.Stop() require.Equal(t, inst, <-instActual) } @@ -210,7 +204,7 @@ func TestInstancePluginDestroyError(t *testing.T) { inst := instance.ID("hello") instActual := make(chan instance.ID, 1) - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDestroy: func(req instance.ID) error { instActual <- req return errors.New("can't do") @@ -218,11 +212,11 @@ func TestInstancePluginDestroyError(t *testing.T) { })) require.NoError(t, err) - err = testClient(t, socketPath).Destroy(inst) + err = NewClient(socketPath).Destroy(inst) require.Error(t, err) require.Equal(t, "can't do", err.Error()) - close(stop) + server.Stop() require.Equal(t, inst, <-instActual) } @@ -234,19 +228,18 @@ func TestInstancePluginDescribeInstancesNiInput(t *testing.T) { list := []instance.Description{ {ID: instance.ID("boo")}, {ID: instance.ID("boop")}, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDescribeInstances: func(req map[string]string) ([]instance.Description, error) { tagsActual <- req return list, nil }, })) - require.NoError(t, err) - l, err := testClient(t, socketPath).DescribeInstances(tags) + l, err := NewClient(socketPath).DescribeInstances(tags) require.NoError(t, err) require.Equal(t, list, l) - close(stop) + server.Stop() require.Equal(t, tags, <-tagsActual) } @@ -260,7 +253,7 @@ func TestInstancePluginDescribeInstances(t *testing.T) { list := []instance.Description{ {ID: instance.ID("boo")}, {ID: instance.ID("boop")}, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDescribeInstances: func(req map[string]string) ([]instance.Description, error) { tagsActual <- req return list, nil @@ -268,11 +261,11 @@ func TestInstancePluginDescribeInstances(t *testing.T) { })) require.NoError(t, err) - l, err := testClient(t, socketPath).DescribeInstances(tags) + l, err := NewClient(socketPath).DescribeInstances(tags) require.NoError(t, err) require.Equal(t, list, l) - close(stop) + server.Stop() require.Equal(t, tags, <-tagsActual) } @@ -286,7 +279,7 @@ func TestInstancePluginDescribeInstancesError(t *testing.T) { list := []instance.Description{ {ID: instance.ID("boo")}, {ID: instance.ID("boop")}, } - stop, _, err := rpc.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ + server, err := rpc_server.StartPluginAtPath(socketPath, PluginServer(&testPlugin{ DoDescribeInstances: func(req map[string]string) ([]instance.Description, error) { tagsActual <- req return list, errors.New("bad") @@ -294,10 +287,10 @@ func TestInstancePluginDescribeInstancesError(t *testing.T) { })) require.NoError(t, err) - _, err = testClient(t, socketPath).DescribeInstances(tags) + _, err = NewClient(socketPath).DescribeInstances(tags) require.Error(t, err) require.Equal(t, "bad", err.Error()) - close(stop) + server.Stop() require.Equal(t, tags, <-tagsActual) } diff --git a/rpc/instance/service.go b/rpc/instance/service.go index 79ae7eda0..98154dc45 100644 --- a/rpc/instance/service.go +++ b/rpc/instance/service.go @@ -1,11 +1,13 @@ package instance import ( + "errors" "github.com/docker/infrakit/spi/instance" + "net/http" ) // PluginServer returns a RPCService that conforms to the net/rpc rpc call convention. -func PluginServer(p instance.Plugin) RPCService { +func PluginServer(p instance.Plugin) *Instance { return &Instance{plugin: p} } @@ -16,8 +18,12 @@ type Instance struct { } // Validate performs local validation on a provision request. -func (p *Instance) Validate(req *ValidateRequest, resp *ValidateResponse) error { - err := p.plugin.Validate(req.Properties) +func (p *Instance) Validate(_ *http.Request, req *ValidateRequest, resp *ValidateResponse) error { + if req.Properties == nil { + return errors.New("Request Properties must be set") + } + + err := p.plugin.Validate(*req.Properties) if err != nil { return err } @@ -26,7 +32,7 @@ func (p *Instance) Validate(req *ValidateRequest, resp *ValidateResponse) error } // Provision creates a new instance based on the spec. -func (p *Instance) Provision(req *ProvisionRequest, resp *ProvisionResponse) error { +func (p *Instance) Provision(_ *http.Request, req *ProvisionRequest, resp *ProvisionResponse) error { id, err := p.plugin.Provision(req.Spec) if err != nil { return err @@ -36,7 +42,7 @@ func (p *Instance) Provision(req *ProvisionRequest, resp *ProvisionResponse) err } // Destroy terminates an existing instance. -func (p *Instance) Destroy(req *DestroyRequest, resp *DestroyResponse) error { +func (p *Instance) Destroy(_ *http.Request, req *DestroyRequest, resp *DestroyResponse) error { err := p.plugin.Destroy(req.Instance) if err != nil { return err @@ -46,7 +52,7 @@ func (p *Instance) Destroy(req *DestroyRequest, resp *DestroyResponse) error { } // DescribeInstances returns descriptions of all instances matching all of the provided tags. -func (p *Instance) DescribeInstances(req *DescribeInstancesRequest, resp *DescribeInstancesResponse) error { +func (p *Instance) DescribeInstances(_ *http.Request, req *DescribeInstancesRequest, resp *DescribeInstancesResponse) error { desc, err := p.plugin.DescribeInstances(req.Tags) if err != nil { return err diff --git a/rpc/instance/types.go b/rpc/instance/types.go index f46d6b9e1..78aa6c65a 100644 --- a/rpc/instance/types.go +++ b/rpc/instance/types.go @@ -8,7 +8,7 @@ import ( // ValidateRequest is the rpc wrapper for the Validate method args type ValidateRequest struct { - Properties json.RawMessage + Properties *json.RawMessage } // ValidateResponse is the rpc wrapper for the Validate response values @@ -45,19 +45,3 @@ type DescribeInstancesRequest struct { type DescribeInstancesResponse struct { Descriptions []instance.Description } - -// RPCService is the interface exposed via JSON RPC -type RPCService interface { - - // Validate performs validation on the input - Validate(req *ValidateRequest, resp *ValidateResponse) error - - // Provision creates a new instance based on the spec. - Provision(req *ProvisionRequest, resp *ProvisionResponse) error - - // Destroy terminates an existing instance. - Destroy(req *DestroyRequest, resp *DestroyResponse) error - - // DescribeInstances returns descriptions of all instances matching all of the provided tags. - DescribeInstances(req *DescribeInstancesRequest, resp *DescribeInstancesResponse) error -} diff --git a/rpc/server.go b/rpc/server.go deleted file mode 100644 index 7006ae23d..000000000 --- a/rpc/server.go +++ /dev/null @@ -1,161 +0,0 @@ -package rpc - -import ( - "net" - "net/http" - "net/rpc" - "net/rpc/jsonrpc" - "os" - "os/signal" - "syscall" - - log "github.com/Sirupsen/logrus" -) - -// StartPluginAtPath starts an HTTP server listening on a unix socket at the specified path. -// Caller can optionally provide functions to execute as part of shutdown sequence. -// Returns a channel to signal stop when closed, a channel to block on stopping, and an error if occurs. -func StartPluginAtPath(socketPath string, endpoint interface{}, shutdown ...func() error) (chan<- struct{}, <-chan error, error) { - log.Infoln("Listening at:", socketPath) - - server := rpc.NewServer() - err := server.Register(endpoint) - if err != nil { - return nil, nil, err - } - engineStop, engineStopped, err := runJSONRPCServer(socketPath, server) - if err != nil { - return nil, nil, err - } - - shutdownTasks := []func() error{} - for _, f := range shutdown { - shutdownTasks = append(shutdownTasks, f) - } - - shutdownTasks = append(shutdownTasks, func() error { - // close channels that others may block on for shutdown - close(engineStop) - err := <-engineStopped - return err - }) - - // Triggers to start shutdown sequence - fromKernel := make(chan os.Signal, 1) - - // kill -9 is SIGKILL and is uncatchable. - signal.Notify(fromKernel, syscall.SIGHUP) // 1 - signal.Notify(fromKernel, syscall.SIGINT) // 2 - signal.Notify(fromKernel, syscall.SIGQUIT) // 3 - signal.Notify(fromKernel, syscall.SIGABRT) // 6 - signal.Notify(fromKernel, syscall.SIGTERM) // 15 - - fromUser := make(chan struct{}) - stopped := make(chan error) - go func(tasks []func() error) { - defer close(stopped) - - select { - case <-fromKernel: - case <-fromUser: - } - for _, task := range tasks { - if err := task(); err != nil { - stopped <- err - return - } - } - return - }(shutdownTasks) - - return fromUser, stopped, nil -} - -// Run the JSON RPC server listening at the given path -func runJSONRPCServer(socketPath string, server *rpc.Server) (chan<- struct{}, <-chan error, error) { - listener, err := net.Listen("unix", socketPath) - if err != nil { - return nil, nil, err - } - - stop := make(chan struct{}) - stopped := make(chan error) - - userInitiated := new(bool) - go func() { - <-stop - *userInitiated = true - listener.Close() - }() - - go func() { - - for { - conn, err := listener.Accept() - if err != nil { - break - } - go server.ServeCodec(jsonrpc.NewServerCodec(conn)) - } - - defer close(stopped) - - switch { - case !*userInitiated && err != nil: - panic(err) - case *userInitiated: - stopped <- nil - default: - stopped <- err - } - }() - return stop, stopped, nil -} - -// Runs the http server. This server offers more control than the standard go's default http server. -// When the returned stop channel is closed, a clean shutdown and shutdown tasks are executed. -// The return value is a channel that can be used to block on. An error is received if server shuts -// down in error; or a nil is received on a clean signalled shutdown. -func runHTTP(socketPath string, server *http.Server) (chan<- struct{}, <-chan error, error) { - listener, err := net.Listen("unix", socketPath) - - if err != nil { - return nil, nil, err - } - - if _, err = os.Lstat(socketPath); err == nil { - // Update socket filename permission - if err := os.Chmod(server.Addr, 0700); err != nil { - return nil, nil, err - } - } else { - return nil, nil, err - } - - stop := make(chan struct{}) - stopped := make(chan error) - - userInitiated := new(bool) - go func() { - <-stop - *userInitiated = true - listener.Close() - }() - - go func() { - // Serve will block until an error (e.g. from shutdown, closed connection) occurs. - err := server.Serve(listener) - - defer close(stopped) - - switch { - case !*userInitiated && err != nil: - panic(err) - case *userInitiated: - stopped <- nil - default: - stopped <- err - } - }() - return stop, stopped, nil -} diff --git a/rpc/server/server.go b/rpc/server/server.go new file mode 100644 index 000000000..29eda4f0c --- /dev/null +++ b/rpc/server/server.go @@ -0,0 +1,68 @@ +package server + +import ( + "fmt" + log "github.com/Sirupsen/logrus" + "gopkg.in/tylerb/graceful.v1" + "net" + "net/http" + "time" + + "github.com/gorilla/handlers" + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json" +) + +// Stoppable support proactive stopping, and blocking until stopped. +type Stoppable interface { + Stop() + AwaitStopped() +} + +type stoppableServer struct { + server *graceful.Server +} + +func (s *stoppableServer) Stop() { + s.server.Stop(10 * time.Second) +} + +func (s *stoppableServer) AwaitStopped() { + <-s.server.StopChan() +} + +// StartPluginAtPath starts an HTTP server listening on a unix socket at the specified path. +// Returns a Stoppable that can be used to stop or block on the server. +func StartPluginAtPath(socketPath string, receiver interface{}) (Stoppable, error) { + server := rpc.NewServer() + server.RegisterCodec(json.NewCodec(), "application/json") + + if err := server.RegisterService(receiver, ""); err != nil { + return nil, err + } + + httpLog := log.New() + httpLog.Level = log.GetLevel() + + handler := handlers.LoggingHandler(httpLog.WriterLevel(log.DebugLevel), server) + gracefulServer := graceful.Server{ + Timeout: 10 * time.Second, + Server: &http.Server{Addr: fmt.Sprintf("unix://%s", socketPath), Handler: handler}, + } + + listener, err := net.Listen("unix", socketPath) + if err != nil { + return nil, err + } + + log.Infof("Listening at: %s", socketPath) + + go func() { + err := gracefulServer.Serve(listener) + if err != nil { + log.Warn(err) + } + }() + + return &stoppableServer{server: &gracefulServer}, nil +} diff --git a/rpc/server_test.go b/rpc/server/server_test.go similarity index 84% rename from rpc/server_test.go rename to rpc/server/server_test.go index f1c0b23ff..cfcd46955 100644 --- a/rpc/server_test.go +++ b/rpc/server/server_test.go @@ -1,4 +1,4 @@ -package rpc +package server import ( "encoding/json" @@ -41,11 +41,10 @@ func TestUnixSocketServer(t *testing.T) { service := plugin_rpc.PluginServer(mock) socket := filepath.Join(os.TempDir(), fmt.Sprintf("%d.sock", time.Now().Unix())) - stop, errors, err := StartPluginAtPath(socket, service) + server, err := StartPluginAtPath(socket, service) require.NoError(t, err) - c, err := plugin_rpc.NewClient("unix", socket) - require.NoError(t, err) + c := plugin_rpc.NewClient(socket) err = c.Validate(properties) require.Error(t, err) @@ -55,9 +54,5 @@ func TestUnixSocketServer(t *testing.T) { require.NoError(t, err) require.Equal(t, instanceID, *id) - // Now we stop the server - close(stop) - - // We shouldn't block here. - <-errors + server.Stop() } diff --git a/vendor.conf b/vendor.conf index 2e4608b79..d0e901679 100644 --- a/vendor.conf +++ b/vendor.conf @@ -2,33 +2,31 @@ # Install trash with `go get github.com/rancher/trash` # Synchronize the vendor/ directory with `vendor.conf` by running `trash`. -# Update dependencies by running `trash -u`. +# Update dependencies by running `trash -u`. -# Local package. +# package github.com/docker/infrakit -github.com/Microsoft/go-winio v0.3.5-2-gce2922f -github.com/Sirupsen/logrus v0.10.0-38-g3ec0642 -github.com/davecgh/go-spew v1.0.0-3-g6d21280 -github.com/docker/distribution docs-v2.4.1-2016-06-28-164-g8234784 -github.com/docker/docker docs-v1.12.0-rc4-2016-07-15-2199-g3b0660d -github.com/docker/go-connections v0.2.1-4-g988efe9 -github.com/docker/go-units v0.3.1-6-gf2145db +github.com/Microsoft/go-winio 0.3.6 +github.com/Sirupsen/logrus v0.11.0-5-gabc6f20 +github.com/davecgh/go-spew v1.0.0-9-g346938d +github.com/docker/distribution v2.6.0-rc.1-4-g7694c31 +github.com/docker/docker docs-v1.12.0-rc4-2016-07-15-3221-gccba791 +github.com/docker/go-connections v0.2.1-10-gf512407 +github.com/docker/go-units v0.3.1-8-g8a7beac github.com/golang/mock bd3c8e8 -github.com/gorilla/context v1.1-7-g08b5f42 -github.com/gorilla/mux v1.1-27-g757bef9 +github.com/gorilla/handlers v1.1-12-ge1b2144 +github.com/gorilla/rpc 22c016f github.com/inconshreveable/mousetrap 76626ae -github.com/kr/fs 2788f0d github.com/nightlyone/lockfile 1d49c98 -github.com/opencontainers/runc v1.0.0-rc2-48-g88b4c48 -github.com/pkg/errors v0.8.0-1-g839d9e9 -github.com/pkg/sftp 4d0e916 +github.com/opencontainers/runc v1.0.0-rc2-137-g43c4300 +github.com/pkg/errors v0.8.0-2-g248dadf github.com/pmezard/go-difflib v1.0.0 -github.com/spf13/afero 52e4a6c -github.com/spf13/cobra 856b96d -github.com/spf13/pflag dabebe2 +github.com/spf13/afero 06b7e5f +github.com/spf13/cobra 6e91dde +github.com/spf13/pflag 5ccb023 github.com/stretchr/testify v1.1.4-4-g976c720 -golang.org/x/crypto 9e9c7d4 -golang.org/x/net a625e39 -golang.org/x/sys 002cbb5 -golang.org/x/text a35b82c +golang.org/x/net 0dd7c8d +golang.org/x/sys b699b70 +golang.org/x/text a263ba8 +gopkg.in/tylerb/graceful.v1 v1.2.13 From 99b03c76b39f31513b106a423e1b5aa00eff643b Mon Sep 17 00:00:00 2001 From: Bill Farner Date: Tue, 15 Nov 2016 07:33:30 -0800 Subject: [PATCH 2/2] Vendor updates Signed-off-by: Bill Farner --- .../Microsoft/go-winio/privilege.go | 17 +- vendor/github.com/Sirupsen/logrus/.travis.yml | 9 +- vendor/github.com/Sirupsen/logrus/README.md | 5 + .../Sirupsen/logrus/text_formatter.go | 11 +- vendor/github.com/davecgh/go-spew/LICENSE | 2 +- vendor/github.com/davecgh/go-spew/README.md | 21 +- .../github.com/davecgh/go-spew/spew/bypass.go | 2 +- .../davecgh/go-spew/spew/bypasssafe.go | 2 +- .../github.com/davecgh/go-spew/spew/common.go | 2 +- .../github.com/davecgh/go-spew/spew/config.go | 11 +- vendor/github.com/davecgh/go-spew/spew/doc.go | 11 +- .../github.com/davecgh/go-spew/spew/dump.go | 8 +- .../github.com/davecgh/go-spew/spew/format.go | 2 +- .../github.com/davecgh/go-spew/spew/spew.go | 2 +- vendor/github.com/docker/distribution/AUTHORS | 35 + .../docker/distribution/CHANGELOG.md | 81 +- .../github.com/docker/distribution/Dockerfile | 2 +- .../docker/distribution/Jenkinsfile | 8 - .../github.com/docker/distribution/Makefile | 2 +- .../docker/distribution/RELEASE-CHECKLIST.md | 36 + .../github.com/docker/distribution/circle.yml | 2 +- .../docker/distribution/reference/helpers.go | 12 + .../distribution/reference/normalize.go | 22 + .../distribution/reference/reference.go | 30 + vendor/github.com/docker/docker/.gitignore | 1 + vendor/github.com/docker/docker/CHANGELOG.md | 263 +- .../github.com/docker/docker/CONTRIBUTING.md | 2 +- vendor/github.com/docker/docker/Dockerfile | 25 +- .../docker/docker/Dockerfile.aarch64 | 18 +- .../github.com/docker/docker/Dockerfile.armhf | 13 +- .../docker/docker/Dockerfile.ppc64le | 13 +- .../github.com/docker/docker/Dockerfile.s390x | 13 +- .../docker/docker/Dockerfile.simple | 13 +- .../docker/docker/Dockerfile.solaris | 20 + .../docker/docker/Dockerfile.windows | 223 +- vendor/github.com/docker/docker/MAINTAINERS | 18 + vendor/github.com/docker/docker/Makefile | 14 +- vendor/github.com/docker/docker/VERSION | 2 +- .../docker/docker/api/swagger-gen.yaml | 12 + .../github.com/docker/docker/api/swagger.yaml | 7240 +++++++++++++++++ .../docker/docker/api/types/client.go | 81 +- .../api/types/container/container_create.go | 21 + .../api/types/container/container_update.go | 17 + .../api/types/container/container_wait.go | 17 + .../docker/api/types/container/host_config.go | 7 +- .../docker/api/types/container/secret.go | 14 + .../docker/docker/api/types/error_response.go | 13 + .../docker/docker/api/types/errors.go | 6 - .../docker/docker/api/types/id_response.go | 13 + .../docker/docker/api/types/image_summary.go | 49 + .../docker/docker/api/types/mount/mount.go | 51 +- .../docker/api/types/network/network.go | 6 + .../docker/docker/api/types/plugin.go | 275 +- .../docker/docker/api/types/plugin_device.go | 25 + .../docker/docker/api/types/plugin_env.go | 25 + .../docker/api/types/plugin_interface_type.go | 21 + .../docker/docker/api/types/plugin_mount.go | 37 + .../docker/api/types/plugin_responses.go | 64 + .../docker/docker/api/types/port.go | 23 + .../docker/api/types/registry/authenticate.go | 21 + .../docker/docker/api/types/stats.go | 3 + .../docker/api/types/swarm/container.go | 46 +- .../docker/docker/api/types/swarm/network.go | 15 + .../docker/docker/api/types/swarm/node.go | 3 +- .../docker/docker/api/types/swarm/secret.go | 33 + .../docker/docker/api/types/swarm/swarm.go | 43 +- .../docker/docker/api/types/swarm/task.go | 11 + .../docker/docker/api/types/types.go | 176 +- .../docker/docker/api/types/volume.go | 58 + .../docker/api/types/volume/volumes_create.go | 29 + .../docker/api/types/volume/volumes_list.go | 23 + .../docker/docker/client/checkpoint_delete.go | 12 +- .../docker/docker/client/checkpoint_list.go | 10 +- .../github.com/docker/docker/client/client.go | 32 +- .../docker/docker/client/container_attach.go | 3 + .../docker/docker/client/container_commit.go | 8 +- .../docker/docker/client/container_create.go | 10 +- .../docker/docker/client/container_exec.go | 9 +- .../docker/docker/client/container_list.go | 4 +- .../docker/docker/client/container_prune.go | 4 + .../docker/docker/client/container_start.go | 3 + .../docker/docker/client/container_stats.go | 2 +- .../docker/docker/client/container_update.go | 5 +- .../docker/docker/client/container_wait.go | 6 +- .../github.com/docker/docker/client/errors.go | 33 + .../github.com/docker/docker/client/hijack.go | 7 +- .../docker/docker/client/image_build.go | 23 +- .../docker/docker/client/image_list.go | 8 +- .../docker/docker/client/image_prune.go | 4 + .../docker/docker/client/interface.go | 47 +- .../docker/client/interface_experimental.go | 26 +- .../docker/docker/client/interface_stable.go | 3 +- .../github.com/docker/docker/client/login.go | 9 +- .../docker/docker/client/network_prune.go | 26 + .../docker/docker/client/node_list.go | 4 +- .../github.com/docker/docker/client/ping.go | 30 + .../docker/docker/client/plugin_create.go | 26 + .../docker/docker/client/plugin_disable.go | 2 - .../docker/docker/client/plugin_enable.go | 2 - .../docker/docker/client/plugin_inspect.go | 2 - .../docker/docker/client/plugin_install.go | 10 +- .../docker/docker/client/plugin_list.go | 2 - .../docker/docker/client/plugin_push.go | 2 - .../docker/docker/client/plugin_remove.go | 2 - .../docker/docker/client/plugin_set.go | 2 - .../docker/docker/client/request.go | 95 +- .../docker/docker/client/secret_create.go | 24 + .../docker/docker/client/secret_inspect.go | 34 + .../docker/docker/client/secret_list.go | 35 + .../docker/docker/client/secret_remove.go | 10 + .../docker/docker/client/service_list.go | 4 +- .../docker/docker/client/service_logs.go | 52 + .../docker/client/swarm_get_unlock_key.go | 21 + .../docker/docker/client/swarm_unlock.go | 17 + .../docker/docker/client/swarm_update.go | 1 + .../docker/docker/client/task_list.go | 4 +- .../github.com/docker/docker/client/utils.go | 15 + .../docker/docker/client/volume_create.go | 3 +- .../docker/docker/client/volume_list.go | 6 +- .../docker/docker/client/volume_prune.go | 4 + .../docker/docker/client/volume_remove.go | 7 +- .../docker/docker/pkg/tlsconfig/config.go | 132 - vendor/github.com/docker/docker/poule.yml | 88 + vendor/github.com/docker/docker/vendor.conf | 140 + .../docker/go-connections/nat/nat.go | 49 +- .../docker/go-connections/nat/parse.go | 1 + .../go-connections/sockets/tcp_socket.go | 2 +- .../go-connections/tlsconfig/certpool_go17.go | 21 + .../tlsconfig/certpool_other.go | 16 + .../docker/go-connections/tlsconfig/config.go | 7 +- vendor/github.com/docker/go-units/duration.go | 2 +- .../github.com/gorilla/handlers/.travis.yml | 18 + vendor/github.com/gorilla/handlers/LICENSE | 22 + vendor/github.com/gorilla/handlers/README.md | 53 + .../github.com/gorilla/handlers/canonical.go | 74 + .../github.com/gorilla/handlers/compress.go | 148 + vendor/github.com/gorilla/handlers/cors.go | 317 + vendor/github.com/gorilla/handlers/doc.go | 9 + .../github.com/gorilla/handlers/handlers.go | 403 + .../gorilla/handlers/proxy_headers.go | 120 + .../github.com/gorilla/handlers/recovery.go | 86 + vendor/github.com/gorilla/rpc/.gitignore | 1 + vendor/github.com/gorilla/rpc/.travis.yml | 19 + .../crypto => github.com/gorilla/rpc}/LICENSE | 8 +- vendor/github.com/gorilla/rpc/README.md | 7 + .../{kr/fs => gorilla/rpc/v2}/LICENSE | 8 +- vendor/github.com/gorilla/rpc/v2/README.md | 6 + .../gorilla/rpc/v2/compression_selector.go | 90 + vendor/github.com/gorilla/rpc/v2/doc.go | 81 + .../gorilla/rpc/v2/encoder_selector.go | 43 + .../github.com/gorilla/rpc/v2/json/client.go | 61 + vendor/github.com/gorilla/rpc/v2/json/doc.go | 58 + .../github.com/gorilla/rpc/v2/json/server.go | 155 + vendor/github.com/gorilla/rpc/v2/map.go | 164 + vendor/github.com/gorilla/rpc/v2/server.go | 164 + vendor/github.com/kr/fs/Readme | 3 - vendor/github.com/kr/fs/filesystem.go | 36 - vendor/github.com/kr/fs/walk.go | 95 - .../github.com/opencontainers/runc/Dockerfile | 1 + .../github.com/opencontainers/runc/Makefile | 2 +- .../github.com/opencontainers/runc/README.md | 11 +- .../runc/libcontainer/README.md | 6 + vendor/github.com/pkg/errors/.travis.yml | 4 +- vendor/github.com/pkg/sftp/.gitignore | 8 - vendor/github.com/pkg/sftp/.travis.yml | 25 - vendor/github.com/pkg/sftp/CONTRIBUTORS | 2 - vendor/github.com/pkg/sftp/LICENSE | 9 - vendor/github.com/pkg/sftp/README.md | 27 - vendor/github.com/pkg/sftp/attrs.go | 237 - vendor/github.com/pkg/sftp/attrs_stubs.go | 11 - vendor/github.com/pkg/sftp/attrs_unix.go | 17 - vendor/github.com/pkg/sftp/client.go | 1128 --- vendor/github.com/pkg/sftp/conn.go | 122 - vendor/github.com/pkg/sftp/debug.go | 9 - vendor/github.com/pkg/sftp/packet.go | 901 -- vendor/github.com/pkg/sftp/release.go | 5 - vendor/github.com/pkg/sftp/server.go | 607 -- .../pkg/sftp/server_statvfs_darwin.go | 21 - .../pkg/sftp/server_statvfs_impl.go | 25 - .../pkg/sftp/server_statvfs_linux.go | 22 - .../pkg/sftp/server_statvfs_stubs.go | 11 - vendor/github.com/pkg/sftp/server_stubs.go | 12 - vendor/github.com/pkg/sftp/server_unix.go | 143 - vendor/github.com/pkg/sftp/sftp.go | 217 - vendor/github.com/spf13/afero/README.md | 10 +- vendor/github.com/spf13/afero/afero.go | 2 +- vendor/github.com/spf13/afero/sftp.go | 128 - vendor/github.com/spf13/afero/sftp/file.go | 95 - vendor/github.com/spf13/afero/sftp_test_go | 286 - vendor/github.com/spf13/pflag/.travis.yml | 3 +- vendor/golang.org/x/crypto/.gitattributes | 10 - vendor/golang.org/x/crypto/.gitignore | 2 - vendor/golang.org/x/crypto/AUTHORS | 3 - vendor/golang.org/x/crypto/CONTRIBUTING.md | 31 - vendor/golang.org/x/crypto/CONTRIBUTORS | 3 - vendor/golang.org/x/crypto/PATENTS | 22 - vendor/golang.org/x/crypto/README | 3 - vendor/golang.org/x/crypto/codereview.cfg | 1 - .../x/crypto/curve25519/const_amd64.s | 20 - .../x/crypto/curve25519/cswap_amd64.s | 88 - .../x/crypto/curve25519/curve25519.go | 841 -- vendor/golang.org/x/crypto/curve25519/doc.go | 23 - .../x/crypto/curve25519/freeze_amd64.s | 94 - .../x/crypto/curve25519/ladderstep_amd64.s | 1398 ---- .../x/crypto/curve25519/mont25519_amd64.go | 240 - .../x/crypto/curve25519/mul_amd64.s | 191 - .../x/crypto/curve25519/square_amd64.s | 153 - vendor/golang.org/x/crypto/ed25519/ed25519.go | 181 - .../ed25519/internal/edwards25519/const.go | 1422 ---- .../internal/edwards25519/edwards25519.go | 1771 ---- vendor/golang.org/x/crypto/ssh/buffer.go | 98 - vendor/golang.org/x/crypto/ssh/certs.go | 503 -- vendor/golang.org/x/crypto/ssh/channel.go | 633 -- vendor/golang.org/x/crypto/ssh/cipher.go | 579 -- vendor/golang.org/x/crypto/ssh/client.go | 213 - vendor/golang.org/x/crypto/ssh/client_auth.go | 473 -- vendor/golang.org/x/crypto/ssh/common.go | 356 - vendor/golang.org/x/crypto/ssh/connection.go | 143 - vendor/golang.org/x/crypto/ssh/doc.go | 18 - vendor/golang.org/x/crypto/ssh/handshake.go | 460 -- vendor/golang.org/x/crypto/ssh/kex.go | 540 -- vendor/golang.org/x/crypto/ssh/keys.go | 880 -- vendor/golang.org/x/crypto/ssh/mac.go | 57 - vendor/golang.org/x/crypto/ssh/messages.go | 758 -- vendor/golang.org/x/crypto/ssh/mux.go | 330 - vendor/golang.org/x/crypto/ssh/server.go | 488 -- vendor/golang.org/x/crypto/ssh/session.go | 627 -- vendor/golang.org/x/crypto/ssh/tcpip.go | 407 - vendor/golang.org/x/crypto/ssh/transport.go | 333 - vendor/golang.org/x/sys/unix/syscall_linux.go | 3 +- .../x/sys/unix/zsyscall_linux_386.go | 15 +- .../x/sys/unix/zsyscall_linux_amd64.go | 15 +- .../x/sys/unix/zsyscall_linux_arm.go | 15 +- .../x/sys/unix/zsyscall_linux_arm64.go | 15 +- .../x/sys/unix/zsyscall_linux_mips64.go | 15 +- .../x/sys/unix/zsyscall_linux_mips64le.go | 15 +- .../x/sys/unix/zsyscall_linux_ppc64.go | 15 +- .../x/sys/unix/zsyscall_linux_ppc64le.go | 15 +- .../x/sys/unix/zsyscall_linux_s390x.go | 15 +- .../x/sys/unix/zsyscall_linux_sparc64.go | 15 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 11 +- .../x/text/unicode/norm/readwriter.go | 1 - vendor/gopkg.in/tylerb/graceful.v1/.gitignore | 23 + .../gopkg.in/tylerb/graceful.v1/.travis.yml | 13 + vendor/gopkg.in/tylerb/graceful.v1/LICENSE | 21 + vendor/gopkg.in/tylerb/graceful.v1/README.md | 152 + .../gopkg.in/tylerb/graceful.v1/graceful.go | 487 ++ .../tylerb/graceful.v1/keepalive_listener.go | 32 + .../tylerb/graceful.v1/limit_listen.go | 77 + 249 files changed, 12928 insertions(+), 19410 deletions(-) delete mode 100644 vendor/github.com/docker/distribution/Jenkinsfile create mode 100644 vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md create mode 100644 vendor/github.com/docker/distribution/reference/helpers.go create mode 100644 vendor/github.com/docker/distribution/reference/normalize.go create mode 100644 vendor/github.com/docker/docker/Dockerfile.solaris create mode 100644 vendor/github.com/docker/docker/api/swagger-gen.yaml create mode 100644 vendor/github.com/docker/docker/api/swagger.yaml create mode 100644 vendor/github.com/docker/docker/api/types/container/container_create.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_update.go create mode 100644 vendor/github.com/docker/docker/api/types/container/container_wait.go create mode 100644 vendor/github.com/docker/docker/api/types/container/secret.go create mode 100644 vendor/github.com/docker/docker/api/types/error_response.go delete mode 100644 vendor/github.com/docker/docker/api/types/errors.go create mode 100644 vendor/github.com/docker/docker/api/types/id_response.go create mode 100644 vendor/github.com/docker/docker/api/types/image_summary.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_device.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_env.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_interface_type.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_mount.go create mode 100644 vendor/github.com/docker/docker/api/types/plugin_responses.go create mode 100644 vendor/github.com/docker/docker/api/types/port.go create mode 100644 vendor/github.com/docker/docker/api/types/registry/authenticate.go create mode 100644 vendor/github.com/docker/docker/api/types/swarm/secret.go create mode 100644 vendor/github.com/docker/docker/api/types/volume.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volumes_create.go create mode 100644 vendor/github.com/docker/docker/api/types/volume/volumes_list.go create mode 100644 vendor/github.com/docker/docker/client/network_prune.go create mode 100644 vendor/github.com/docker/docker/client/ping.go create mode 100644 vendor/github.com/docker/docker/client/plugin_create.go create mode 100644 vendor/github.com/docker/docker/client/secret_create.go create mode 100644 vendor/github.com/docker/docker/client/secret_inspect.go create mode 100644 vendor/github.com/docker/docker/client/secret_list.go create mode 100644 vendor/github.com/docker/docker/client/secret_remove.go create mode 100644 vendor/github.com/docker/docker/client/service_logs.go create mode 100644 vendor/github.com/docker/docker/client/swarm_get_unlock_key.go create mode 100644 vendor/github.com/docker/docker/client/swarm_unlock.go create mode 100644 vendor/github.com/docker/docker/client/utils.go delete mode 100644 vendor/github.com/docker/docker/pkg/tlsconfig/config.go create mode 100644 vendor/github.com/docker/docker/poule.yml create mode 100644 vendor/github.com/docker/docker/vendor.conf create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go create mode 100644 vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go create mode 100644 vendor/github.com/gorilla/handlers/.travis.yml create mode 100644 vendor/github.com/gorilla/handlers/LICENSE create mode 100644 vendor/github.com/gorilla/handlers/README.md create mode 100644 vendor/github.com/gorilla/handlers/canonical.go create mode 100644 vendor/github.com/gorilla/handlers/compress.go create mode 100644 vendor/github.com/gorilla/handlers/cors.go create mode 100644 vendor/github.com/gorilla/handlers/doc.go create mode 100644 vendor/github.com/gorilla/handlers/handlers.go create mode 100644 vendor/github.com/gorilla/handlers/proxy_headers.go create mode 100644 vendor/github.com/gorilla/handlers/recovery.go create mode 100644 vendor/github.com/gorilla/rpc/.gitignore create mode 100644 vendor/github.com/gorilla/rpc/.travis.yml rename vendor/{golang.org/x/crypto => github.com/gorilla/rpc}/LICENSE (83%) create mode 100644 vendor/github.com/gorilla/rpc/README.md rename vendor/github.com/{kr/fs => gorilla/rpc/v2}/LICENSE (83%) create mode 100644 vendor/github.com/gorilla/rpc/v2/README.md create mode 100644 vendor/github.com/gorilla/rpc/v2/compression_selector.go create mode 100644 vendor/github.com/gorilla/rpc/v2/doc.go create mode 100644 vendor/github.com/gorilla/rpc/v2/encoder_selector.go create mode 100644 vendor/github.com/gorilla/rpc/v2/json/client.go create mode 100644 vendor/github.com/gorilla/rpc/v2/json/doc.go create mode 100644 vendor/github.com/gorilla/rpc/v2/json/server.go create mode 100644 vendor/github.com/gorilla/rpc/v2/map.go create mode 100644 vendor/github.com/gorilla/rpc/v2/server.go delete mode 100644 vendor/github.com/kr/fs/Readme delete mode 100644 vendor/github.com/kr/fs/filesystem.go delete mode 100644 vendor/github.com/kr/fs/walk.go delete mode 100644 vendor/github.com/pkg/sftp/.gitignore delete mode 100644 vendor/github.com/pkg/sftp/.travis.yml delete mode 100644 vendor/github.com/pkg/sftp/CONTRIBUTORS delete mode 100644 vendor/github.com/pkg/sftp/LICENSE delete mode 100644 vendor/github.com/pkg/sftp/README.md delete mode 100644 vendor/github.com/pkg/sftp/attrs.go delete mode 100644 vendor/github.com/pkg/sftp/attrs_stubs.go delete mode 100644 vendor/github.com/pkg/sftp/attrs_unix.go delete mode 100644 vendor/github.com/pkg/sftp/client.go delete mode 100644 vendor/github.com/pkg/sftp/conn.go delete mode 100644 vendor/github.com/pkg/sftp/debug.go delete mode 100644 vendor/github.com/pkg/sftp/packet.go delete mode 100644 vendor/github.com/pkg/sftp/release.go delete mode 100644 vendor/github.com/pkg/sftp/server.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_darwin.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_impl.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_linux.go delete mode 100644 vendor/github.com/pkg/sftp/server_statvfs_stubs.go delete mode 100644 vendor/github.com/pkg/sftp/server_stubs.go delete mode 100644 vendor/github.com/pkg/sftp/server_unix.go delete mode 100644 vendor/github.com/pkg/sftp/sftp.go delete mode 100644 vendor/github.com/spf13/afero/sftp.go delete mode 100644 vendor/github.com/spf13/afero/sftp/file.go delete mode 100644 vendor/github.com/spf13/afero/sftp_test_go delete mode 100644 vendor/golang.org/x/crypto/.gitattributes delete mode 100644 vendor/golang.org/x/crypto/.gitignore delete mode 100644 vendor/golang.org/x/crypto/AUTHORS delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTING.md delete mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS delete mode 100644 vendor/golang.org/x/crypto/PATENTS delete mode 100644 vendor/golang.org/x/crypto/README delete mode 100644 vendor/golang.org/x/crypto/codereview.cfg delete mode 100644 vendor/golang.org/x/crypto/curve25519/const_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/cswap_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/doc.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/freeze_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go delete mode 100644 vendor/golang.org/x/crypto/curve25519/mul_amd64.s delete mode 100644 vendor/golang.org/x/crypto/curve25519/square_amd64.s delete mode 100644 vendor/golang.org/x/crypto/ed25519/ed25519.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go delete mode 100644 vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go delete mode 100644 vendor/golang.org/x/crypto/ssh/buffer.go delete mode 100644 vendor/golang.org/x/crypto/ssh/certs.go delete mode 100644 vendor/golang.org/x/crypto/ssh/channel.go delete mode 100644 vendor/golang.org/x/crypto/ssh/cipher.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client.go delete mode 100644 vendor/golang.org/x/crypto/ssh/client_auth.go delete mode 100644 vendor/golang.org/x/crypto/ssh/common.go delete mode 100644 vendor/golang.org/x/crypto/ssh/connection.go delete mode 100644 vendor/golang.org/x/crypto/ssh/doc.go delete mode 100644 vendor/golang.org/x/crypto/ssh/handshake.go delete mode 100644 vendor/golang.org/x/crypto/ssh/kex.go delete mode 100644 vendor/golang.org/x/crypto/ssh/keys.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mac.go delete mode 100644 vendor/golang.org/x/crypto/ssh/messages.go delete mode 100644 vendor/golang.org/x/crypto/ssh/mux.go delete mode 100644 vendor/golang.org/x/crypto/ssh/server.go delete mode 100644 vendor/golang.org/x/crypto/ssh/session.go delete mode 100644 vendor/golang.org/x/crypto/ssh/tcpip.go delete mode 100644 vendor/golang.org/x/crypto/ssh/transport.go create mode 100644 vendor/gopkg.in/tylerb/graceful.v1/.gitignore create mode 100644 vendor/gopkg.in/tylerb/graceful.v1/.travis.yml create mode 100644 vendor/gopkg.in/tylerb/graceful.v1/LICENSE create mode 100644 vendor/gopkg.in/tylerb/graceful.v1/README.md create mode 100644 vendor/gopkg.in/tylerb/graceful.v1/graceful.go create mode 100644 vendor/gopkg.in/tylerb/graceful.v1/keepalive_listener.go create mode 100644 vendor/gopkg.in/tylerb/graceful.v1/limit_listen.go diff --git a/vendor/github.com/Microsoft/go-winio/privilege.go b/vendor/github.com/Microsoft/go-winio/privilege.go index 3d59412c7..e363eeb27 100644 --- a/vendor/github.com/Microsoft/go-winio/privilege.go +++ b/vendor/github.com/Microsoft/go-winio/privilege.go @@ -83,7 +83,7 @@ func RunWithPrivileges(names []string, fn func() error) error { return err } defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges) + err = adjustPrivileges(token, privileges, SE_PRIVILEGE_ENABLED) if err != nil { return err } @@ -110,6 +110,15 @@ func mapPrivileges(names []string) ([]uint64, error) { // EnableProcessPrivileges enables privileges globally for the process. func EnableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, SE_PRIVILEGE_ENABLED) +} + +// DisableProcessPrivileges disables privileges globally for the process. +func DisableProcessPrivileges(names []string) error { + return enableDisableProcessPrivilege(names, 0) +} + +func enableDisableProcessPrivilege(names []string, action uint32) error { privileges, err := mapPrivileges(names) if err != nil { return err @@ -123,15 +132,15 @@ func EnableProcessPrivileges(names []string) error { } defer token.Close() - return adjustPrivileges(token, privileges) + return adjustPrivileges(token, privileges, action) } -func adjustPrivileges(token windows.Token, privileges []uint64) error { +func adjustPrivileges(token windows.Token, privileges []uint64, action uint32) error { var b bytes.Buffer binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) for _, p := range privileges { binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, uint32(SE_PRIVILEGE_ENABLED)) + binary.Write(&b, binary.LittleEndian, action) } prevState := make([]byte, b.Len()) reqSize := uint32(0) diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml index dee4eb2cc..ec7dd7833 100644 --- a/vendor/github.com/Sirupsen/logrus/.travis.yml +++ b/vendor/github.com/Sirupsen/logrus/.travis.yml @@ -1,10 +1,9 @@ language: go go: - - 1.3 - - 1.4 - - 1.5 - 1.6 + - 1.7 - tip install: - - go get -t ./... -script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./... + - go get -t $(go list ./... | grep -v /examples/) +script: + - GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v $(go list ./... | grep -v /examples/) diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md index ab48929df..f9cfb0a5d 100644 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/Sirupsen/logrus/README.md @@ -228,8 +228,12 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | | [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| | [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| +| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| | [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | +| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | | [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | +| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | +| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | #### Level logging @@ -367,6 +371,7 @@ entries. It should not be a feature of the application-level logger. | Tool | Description | | ---- | ----------- | |[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| +|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper arround Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) | #### Testing diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go index cce61f2a0..9114b3ca4 100644 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -122,7 +122,8 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin } for _, k := range keys { v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) } } @@ -142,7 +143,11 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf b.WriteString(key) b.WriteByte('=') + f.appendValue(b, value) + b.WriteByte(' ') +} +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { switch value := value.(type) { case string: if !needsQuoting(value) { @@ -155,11 +160,9 @@ func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interf if !needsQuoting(errmsg) { b.WriteString(errmsg) } else { - fmt.Fprintf(b, "%q", value) + fmt.Fprintf(b, "%q", errmsg) } default: fmt.Fprint(b, value) } - - b.WriteByte(' ') } diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE index bb6733231..c83641619 100644 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -1,6 +1,6 @@ ISC License -Copyright (c) 2012-2013 Dave Collins +Copyright (c) 2012-2016 Dave Collins Permission to use, copy, modify, and distribute this software for any purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/README.md b/vendor/github.com/davecgh/go-spew/README.md index 556170ae6..262430449 100644 --- a/vendor/github.com/davecgh/go-spew/README.md +++ b/vendor/github.com/davecgh/go-spew/README.md @@ -1,11 +1,13 @@ go-spew ======= -[![Build Status](https://travis-ci.org/davecgh/go-spew.png?branch=master)] -(https://travis-ci.org/davecgh/go-spew) [![Coverage Status] -(https://coveralls.io/repos/davecgh/go-spew/badge.png?branch=master)] +[![Build Status](https://img.shields.io/travis/davecgh/go-spew.svg)] +(https://travis-ci.org/davecgh/go-spew) [![ISC License] +(http://img.shields.io/badge/license-ISC-blue.svg)](http://copyfree.org) [![Coverage Status] +(https://img.shields.io/coveralls/davecgh/go-spew.svg)] (https://coveralls.io/r/davecgh/go-spew?branch=master) + Go-spew implements a deep pretty printer for Go data structures to aid in debugging. A comprehensive suite of tests with 100% test coverage is provided to ensure proper functionality. See `test_coverage.txt` for the gocov coverage @@ -19,7 +21,7 @@ post about it ## Documentation -[![GoDoc](https://godoc.org/github.com/davecgh/go-spew/spew?status.png)] +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg)] (http://godoc.org/github.com/davecgh/go-spew/spew) Full `go doc` style documentation for the project can be viewed online without @@ -160,6 +162,15 @@ options. See the ConfigState documentation for more details. App Engine or with the "safe" build tag specified. Pointer method invocation is enabled by default. +* DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + +* DisableCapacities + DisableCapacities specifies whether to disable the printing of capacities + for arrays, slices, maps and channels. This is useful when diffing data + structures in tests. + * ContinueOnMethod Enables recursion into types after invoking error and Stringer interface methods. Recursion after method invocation is disabled by default. @@ -191,4 +202,4 @@ using the unsafe package. ## License -Go-spew is licensed under the liberal ISC License. +Go-spew is licensed under the [copyfree](http://copyfree.org) ISC License. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go index d42a0bc4a..8a4a6589a 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go index e47a4e795..1fe3cf3d5 100644 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -1,4 +1,4 @@ -// Copyright (c) 2015 Dave Collins +// Copyright (c) 2015-2016 Dave Collins // // Permission to use, copy, modify, and distribute this software for any // purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go index 14f02dc15..7c519ff47 100644 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go index 555282723..2e3d22f31 100644 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -67,6 +67,15 @@ type ConfigState struct { // Google App Engine or with the "safe" build tag specified. DisablePointerMethods bool + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + // ContinueOnMethod specifies whether or not recursion should continue once // a custom error or Stringer interface is invoked. The default, false, // means it will print the results of invoking the custom error or Stringer diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go index 5be0c4060..aacaac6f1 100644 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -91,6 +91,15 @@ The following configuration options are available: which only accept pointer receivers from non-pointer variables. Pointer method invocation is enabled by default. + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + * ContinueOnMethod Enables recursion into types after invoking error and Stringer interface methods. Recursion after method invocation is disabled by default. diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go index a0ff95e27..df1d582a7 100644 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above @@ -129,7 +129,7 @@ func (d *dumpState) dumpPtr(v reflect.Value) { d.w.Write(closeParenBytes) // Display pointer information. - if len(pointerChain) > 0 { + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { d.w.Write(openParenBytes) for i, addr := range pointerChain { if i > 0 { @@ -282,13 +282,13 @@ func (d *dumpState) dump(v reflect.Value) { case reflect.Map, reflect.String: valueLen = v.Len() } - if valueLen != 0 || valueCap != 0 { + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { d.w.Write(openParenBytes) if valueLen != 0 { d.w.Write(lenEqualsBytes) printInt(d.w, int64(valueLen), 10) } - if valueCap != 0 { + if !d.cs.DisableCapacities && valueCap != 0 { if valueLen != 0 { d.w.Write(spaceBytes) } diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go index ecf3b80e2..c49875bac 100644 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go index d8233f542..32c0e3388 100644 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -1,5 +1,5 @@ /* - * Copyright (c) 2013 Dave Collins + * Copyright (c) 2013-2016 Dave Collins * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above diff --git a/vendor/github.com/docker/distribution/AUTHORS b/vendor/github.com/docker/distribution/AUTHORS index 9e80e062b..252ff8aa2 100644 --- a/vendor/github.com/docker/distribution/AUTHORS +++ b/vendor/github.com/docker/distribution/AUTHORS @@ -1,6 +1,8 @@ +a-palchikov Aaron Lehmann Aaron Schlesinger Aaron Vinson +Adam Duke Adam Enger Adrian Mouat Ahmet Alp Balkan @@ -19,6 +21,7 @@ Anis Elleuch Anton Tiurin Antonio Mercado Antonio Murdaca +Anusha Ragunathan Arien Holthuizen Arnaud Porterie Arthur Baars @@ -26,12 +29,16 @@ Asuka Suzuki Avi Miller Ayose Cazorla BadZen +Ben Bodenmiller Ben Firshman bin liu Brian Bland burnettk Carson A +Cezar Sa Espinola +Charles Smith Chris Dillon +cuiwei13 cyli Daisuke Fujita Daniel Huhn @@ -48,11 +55,14 @@ Diogo Mónica DJ Enriquez Donald Huang Doug Davis +Edgar Lee Eric Yang +Fabio Berchtold Fabio Huser farmerworking Felix Yan Florentin Raud +Frank Chen Frederick F. Kautz IV gabriell nascimento Gleb Schukin @@ -64,16 +74,23 @@ HuKeping Ian Babrou igayoso Jack Griffin +James Findley Jason Freidman +Jason Heiss Jeff Nickoloff +Jess Frazelle Jessie Frazelle jhaohai Jianqing Wang +Jihoon Chung +Joao Fernandes +John Mulhausen John Starks Jon Johnson Jon Poler Jonathan Boulle Jordan Liggitt +Josh Chorlton Josh Hawn Julien Fernandez Ke Xu @@ -84,22 +101,30 @@ Kenny Leung Li Yi Liu Hua liuchang0812 +Lloyd Ramey Louis Kottmann Luke Carpenter +Marcus Martins Mary Anthony Matt Bentley Matt Duch Matt Moore Matt Robenolt +Matthew Green Michael Prokop Michal Minar +Michal Minář +Mike Brown Miquel Sabaté +Misty Stanley-Jones +Misty Stanley-Jones Morgan Bauer moxiegirl Nathan Sullivan nevermosby Nghia Tran Nikita Tarasov +Noah Treuhaft Nuutti Kotivuori Oilbeater Olivier Gambier @@ -108,17 +133,23 @@ Omer Cohen Patrick Devine Phil Estes Philip Misiowiec +Pierre-Yves Ritschard +Qiao Anran +Randy Barlow Richard Scothern Rodolfo Carvalho Rusty Conover Sean Boran Sebastiaan van Stijn +Sebastien Coavoux Serge Dubrouski Sharif Nassar Shawn Falkner-Horine Shreyas Karnik Simon Thulbourn +spacexnice Spencer Rinehart +Stan Hu Stefan Majewsky Stefan Weil Stephen J Day @@ -134,6 +165,8 @@ Tonis Tiigi Tony Holdstock-Brown Trevor Pounds Troels Thomsen +Victor Vieux +Victoria Bialas Vincent Batts Vincent Demeester Vincent Giersch @@ -142,6 +175,8 @@ weiyuan.yl xg.song xiekeyang Yann ROBERT +yaoyao.xyy +yuexiao-wang yuzou zhouhaibing089 姜继忠 diff --git a/vendor/github.com/docker/distribution/CHANGELOG.md b/vendor/github.com/docker/distribution/CHANGELOG.md index 3445c090c..dce492078 100644 --- a/vendor/github.com/docker/distribution/CHANGELOG.md +++ b/vendor/github.com/docker/distribution/CHANGELOG.md @@ -1,9 +1,80 @@ # Changelog +## 2.6.0-rc1 (2016-10-10) + +#### Storage + - S3: fixed bug in delete due to read-after-write inconsistency + - S3: allow EC2 IAM roles to be used when authorizing region endpoints + - S3: add Object ACL Support + - S3: fix delete method's notion of subpaths + - S3: use multipart upload API in `Move` method for performance + - S3: add v2 signature signing for legacy S3 clones + - Swift: add simple heuristic to detect incomplete DLOs during read ops + - Swift: support different user and tenant domains + - Swift: bulk deletes in chunks + - Aliyun OSS: fix delete method's notion of subpaths + - Aliyun OSS: optimize data copy after upload finishes + - Azure: close leaking response body + - Fix storage drivers dropping non-EOF errors when listing repositories + - Compare path properly when listing repositories in catalog + - Add a foreign layer URL host whitelist + - Improve catalog enumerate runtime + +#### Registry + - Override media type returned from `Stat()` for existing manifests + - Export `storage.CreateOptions` in top-level package + - Enable notifications to endpoints that use self-signed certificates + - Properly validate multi-URL foreign layers + - Add control over validation of URLs in pushed manifests + - Proxy mode: fix socket leak when pull is cancelled + - Tag service: properly handle error responses on HEAD request + - Support for custom authentication URL in proxying registry + - Add configuration option to disable access logging + - Add notification filtering by target media type + - Manifest: `References()` returns all children + - Honor `X-Forwarded-Port` and Forwarded headers + - Reference: Preserve tag and digest in With* functions + +#### Client + - Changes the client Tags `All()` method to follow links + - Allow registry clients to connect via HTTP2 + - Better handling of OAuth errors in client + +#### Spec + - Manifest: clarify relationship between urls and foreign layers + +#### Manifest + - Add plugin mediatype to distribution manifest + +#### Docs + + - Document `TOOMANYREQUESTS` error code + - Document required Let's Encrypt port + - Improve documentation around implementation of OAuth2 + +#### Auth + - Add support for registry type in scope + - Add support for using v2 ping challenges for v1 + - Add leeway to JWT `nbf` and `exp` checking + - htpasswd: dynamically parse htpasswd file + - Fix missing auth headers with PATCH HTTP request when pushing to default port + +#### Dockerfile + - Update to go1.7 + - Reorder Dockerfile steps for better layer caching + +#### Notes + +Documentation has moved to the documentation repository at +`github.com/docker/docker.github.io/tree/master/registry` + +The registry is go 1.7 compliant, and passes newer, more restrictive `lint` and `vet` ing. + + ## 2.5.0 (2016-06-14) -### Storage -- Ensure uploads directory is cleaned after upload is commited +#### Storage +- Ensure uploads directory is cleaned after upload is committed - Add ability to cap concurrent operations in filesystem driver - S3: Add 'us-gov-west-1' to the valid region list - Swift: Handle ceph not returning Last-Modified header for HEAD requests @@ -23,13 +94,13 @@ - Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported - Clarify API documentation around catalog fetch behavior -### API +#### API - Support returning HTTP 429 (Too Many Requests) -### Documentation +#### Documentation - Update auth documentation examples to show "expires in" as int -### Docker Image +#### Docker Image - Use Alpine Linux as base image diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile index bc3c78577..426954a11 100644 --- a/vendor/github.com/docker/distribution/Dockerfile +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.6-alpine +FROM golang:1.7-alpine ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution ENV DOCKER_BUILDTAGS include_oss include_gcs diff --git a/vendor/github.com/docker/distribution/Jenkinsfile b/vendor/github.com/docker/distribution/Jenkinsfile deleted file mode 100644 index fa29520b5..000000000 --- a/vendor/github.com/docker/distribution/Jenkinsfile +++ /dev/null @@ -1,8 +0,0 @@ -// Only run on Linux atm -wrappedNode(label: 'docker') { - deleteDir() - stage "checkout" - checkout scm - - documentationChecker("docs") -} diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile index 2cd5bb147..47b8f1d0b 100644 --- a/vendor/github.com/docker/distribution/Makefile +++ b/vendor/github.com/docker/distribution/Makefile @@ -13,7 +13,7 @@ endif GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" -.PHONY: clean all fmt vet lint build test binaries +.PHONY: all build binaries clean dep-restore dep-save dep-validate fmt lint test test-full vet .DEFAULT: all all: fmt vet lint build test binaries diff --git a/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md new file mode 100644 index 000000000..49235cecd --- /dev/null +++ b/vendor/github.com/docker/distribution/RELEASE-CHECKLIST.md @@ -0,0 +1,36 @@ +## Registry Release Checklist + +10. Compile release notes detailing features and since the last release. Update the `CHANGELOG.md` file. + +20. Update the version file: `https://github.com/docker/distribution/blob/master/version/version.go` + +30. Update the `MAINTAINERS` (if necessary), `AUTHORS` and `.mailmap` files. + + ``` +make AUTHORS +``` + +40. Create a signed tag. + + Distribution uses semantic versioning. Tags are of the format `vx.y.z[-rcn]` +You will need PGP installed and a PGP key which has been added to your Github account. The comment for the tag should include the release notes. + +50. Push the signed tag + +60. Create a new [release](https://github.com/docker/distribution/releases). In the case of a release candidate, tick the `pre-release` checkbox. + +70. Update the registry binary in [distribution library image repo](https://github.com/docker/distribution-library-image) by running the update script and opening a pull request. + +80. Update the official image. Add the new version in the [official images repo](https://github.com/docker-library/official-images) by appending a new version to the `registry/registry` file with the git hash pointed to by the signed tag. Update the major version to point to the latest version and the minor version to point to new patch release if necessary. +e.g. to release `2.3.1` + + `2.3.1 (new)` + + `2.3.0 -> 2.3.0` can be removed + + `2 -> 2.3.1` + + `2.3 -> 2.3.1` + +90. Build a new distribution/registry image on [Docker hub](https://hub.docker.com/u/distribution/dashboard) by adding a new automated build with the new tag and re-building the images. + diff --git a/vendor/github.com/docker/distribution/circle.yml b/vendor/github.com/docker/distribution/circle.yml index 52348a4bc..61f8be0cb 100644 --- a/vendor/github.com/docker/distribution/circle.yml +++ b/vendor/github.com/docker/distribution/circle.yml @@ -8,7 +8,7 @@ machine: post: # go - - gvm install go1.6 --prefer-binary --name=stable + - gvm install go1.7 --prefer-binary --name=stable environment: # Convenient shortcuts to "common" locations diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 000000000..dd7ee0ea6 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,12 @@ +package reference + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 000000000..b19a34e3b --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,22 @@ +package reference + +var ( + defaultTag = "latest" +) + +// EnsureTagged adds the default tag "latest" to a reference if it only has +// a repo name. +func EnsureTagged(ref Named) NamedTagged { + namedTagged, ok := ref.(NamedTagged) + if !ok { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return namedTagged +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go index dc3af1670..02786628e 100644 --- a/vendor/github.com/docker/distribution/reference/reference.go +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -24,6 +24,7 @@ package reference import ( "errors" "fmt" + "path" "strings" "github.com/docker/distribution/digest" @@ -218,6 +219,13 @@ func WithTag(name Named, tag string) (NamedTagged, error) { if !anchoredTagRegexp.MatchString(tag) { return nil, ErrTagInvalidFormat } + if canonical, ok := name.(Canonical); ok { + return reference{ + name: name.Name(), + tag: tag, + digest: canonical.Digest(), + }, nil + } return taggedReference{ name: name.Name(), tag: tag, @@ -230,12 +238,34 @@ func WithDigest(name Named, digest digest.Digest) (Canonical, error) { if !anchoredDigestRegexp.MatchString(digest.String()) { return nil, ErrDigestInvalidFormat } + if tagged, ok := name.(Tagged); ok { + return reference{ + name: name.Name(), + tag: tagged.Tag(), + digest: digest, + }, nil + } return canonicalReference{ name: name.Name(), digest: digest, }, nil } +// Match reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func Match(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, ref.String()) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, namedRef.Name()) + } + return matched, err +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + return repository(ref.Name()) +} + func getBestReferenceType(ref reference) Reference { if ref.name == "" { // Allow digest only references diff --git a/vendor/github.com/docker/docker/.gitignore b/vendor/github.com/docker/docker/.gitignore index 0f082f529..1e4d86401 100644 --- a/vendor/github.com/docker/docker/.gitignore +++ b/vendor/github.com/docker/docker/.gitignore @@ -9,6 +9,7 @@ .DS_Store # a .bashrc may be added to customize the build environment .bashrc +.editorconfig .gopath/ .go-pkg-cache/ autogen/ diff --git a/vendor/github.com/docker/docker/CHANGELOG.md b/vendor/github.com/docker/docker/CHANGELOG.md index 195cf9739..a3765181a 100644 --- a/vendor/github.com/docker/docker/CHANGELOG.md +++ b/vendor/github.com/docker/docker/CHANGELOG.md @@ -5,6 +5,265 @@ information on the list of deprecated flags and APIs please have a look at https://docs.docker.com/engine/deprecated/ where target removal dates can also be found. +## 1.13.0 (2016-12-08) + +### Builder ++ Add capability to specify images used as a cache source on build. These images do not need to have local parent chain and can be pulled from other registries [#26839](https://github.com/docker/docker/pull/26839) ++ (experimental) Add option to squash image layers to the FROM image after successful builds [#22641](https://github.com/docker/docker/pull/22641) +* Fix dockerfile parser with empty line after escape [#24725](https://github.com/docker/docker/pull/24725) +- Add step number on `docker build` [#24978](https://github.com/docker/docker/pull/24978) ++ Add support for compressing build context during image build [#25837](https://github.com/docker/docker/pull/25837) ++ add `--network` to `docker build` [#27702](https://github.com/docker/docker/pull/27702) +- Fix inconsistent behavior between `--label` flag on `docker build` and `docker run` [#26027](https://github.com/docker/docker/issues/26027) +- Fix image layer inconsistencies when using the overlay storage driver [#27209](https://github.com/docker/docker/pull/27209) +* Unused build-args are now allowed. A warning is presented instead of an error and failed build [#27412](https://github.com/docker/docker/pull/27412) +- Fix builder cache issue on Windows [#27805](https://github.com/docker/docker/pull/27805) + +### Contrib ++ Add support for building docker debs for Ubuntu Xenial on PPC64 [#23438](https://github.com/docker/docker/pull/23438) ++ Add support for building docker debs for Ubuntu Xenial on s390x [#26104](https://github.com/docker/docker/pull/26104) +- Add RPM builder for VMWare Photon OS [#24116](https://github.com/docker/docker/pull/24116) ++ Add shell completions to tgz [#27735](https://github.com/docker/docker/pull/27735) +* Update the install script to allow using the mirror in China [#27005](https://github.com/docker/docker/pull/27005) ++ Add DEB builder for Ubuntu 16.10 Yakkety Yak [#27993](https://github.com/docker/docker/pull/27993) ++ Add RPM builder for Fedora 25 [#28222](https://github.com/docker/docker/pull/28222) + +### Distribution + +* Update notary dependency to 0.4.2 (full changelogs [here](https://github.com/docker/notary/releases/tag/v0.4.2)) [#27074](https://github.com/docker/docker/pull/27074) + - Support for compilation on windows [docker/notary#970](https://github.com/docker/notary/pull/970) + - Improved error messages for client authentication errors [docker/notary#972](https://github.com/docker/notary/pull/972) + - Support for finding keys that are anywhere in the `~/.docker/trust/private` directory, not just under `~/.docker/trust/private/root_keys` or `~/.docker/trust/private/tuf_keys` [docker/notary#981](https://github.com/docker/notary/pull/981) + - Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [docker/notary#982](https://github.com/docker/notary/pull/982) + - Improve root validation and yubikey debug logging [docker/notary#858](https://github.com/docker/notary/pull/858) [docker/notary#891](https://github.com/docker/notary/pull/891) + - Warn if certificates for root or delegations are near expiry [docker/notary#802](https://github.com/docker/notary/pull/802) + - Warn if role metadata is near expiry [docker/notary#786](https://github.com/docker/notary/pull/786) + - Fix passphrase retrieval attempt counting and terminal detection [docker/notary#906](https://github.com/docker/notary/pull/906) +- Avoid unnecessary blob uploads when different users push same layers to authenticated registry [#26564](https://github.com/docker/docker/pull/26564) +* Allow external storage for registry credentials [#26354](https://github.com/docker/docker/pull/26354) + +### Logging + +* Standardize the default logging tag value in all logging drivers [#22911](https://github.com/docker/docker/pull/22911) +- Improve performance and memory use when logging of long log lines [#22982](https://github.com/docker/docker/pull/22982) ++ Enable syslog driver for windows [#25736](https://github.com/docker/docker/pull/25736) ++ Add Logentries Driver [#27471](https://github.com/docker/docker/pull/27471) ++ Update of AWS log driver to support tags [#27707](https://github.com/docker/docker/pull/27707) ++ Unix socket support for fluentd [#26088](https://github.com/docker/docker/pull/26088) +* Enable fluentd logging driver on Windows [#28189](https://github.com/docker/docker/pull/28189) +- Sanitize docker labels when used as journald field names [#23725](https://github.com/docker/docker/pull/23725) + +### Networking + ++ Add `--attachable` network support to enable `docker run` to work in swarm-mode overlay network [#25962](https://github.com/docker/docker/pull/25962) ++ Add support for host port PublishMode in services using the `--port` option in `docker service create` [#27917](https://github.com/docker/docker/pull/27917) ++ Add support for Windows server 2016 overlay network driver (requires upcoming ws2016 update) [#28182](https://github.com/docker/docker/pull/28182) +* Change the default `FORWARD` policy to `DROP` [#28257](https://github.com/docker/docker/pull/28257) ++ Add support for specifying static IP addresses for predefined network on windows [#22208](https://github.com/docker/docker/pull/22208) +- Fix `--publish` flag on `docker run` not working with IPv6 addresses [#27860](https://github.com/docker/docker/pull/27860) +- Fix inspect network show gateway with mask [#25564](https://github.com/docker/docker/pull/25564) +- Fix an issue where multiple addresses in a bridge may cause `--fixed-cidr` to not have the correct addresses [#26659](https://github.com/docker/docker/pull/26659) ++ Add creation timestamp to `docker network inspect` [#26130](https://github.com/docker/docker/pull/26130) +- Show peer nodes in `docker network inspect` for swarm overlay networks [#28078](https://github.com/docker/docker/pull/28078) +- Enable ping for service VIP address [#28019](https://github.com/docker/docker/pull/28019) + +### Plugins + +- Move plugins out of experimental [#28226](https://github.com/docker/docker/pull/28226) +- Add `--force` on `docker plugin remove` [#25096](https://github.com/docker/docker/pull/25096) +* Add support for dynamically reloading authorization plugins [#22770](https://github.com/docker/docker/pull/22770) ++ Add description in `docker plugin ls` [#25556](https://github.com/docker/docker/pull/25556) ++ Add `-f`/`--format` to `docker plugin inspect` [#25990](https://github.com/docker/docker/pull/25990) ++ Add `docker plugin create` command [#28164](https://github.com/docker/docker/pull/28164) +* Send request's TLS peer certificates to authorization plugins [#27383](https://github.com/docker/docker/pull/27383) +* Support for global-scoped network and ipam plugins in swarm-mode [#27287](https://github.com/docker/docker/pull/27287) + +### Remote API (v1.25) & Client + ++ Support `docker stack deploy` from a Compose file [#27998](https://github.com/docker/docker/pull/27998) ++ (experimental) Implement checkpoint and restore [#22049](https://github.com/docker/docker/pull/22049) ++ Add `--format` flag to `docker info` [#23808](https://github.com/docker/docker/pull/23808) +* Remove `--name` from `docker volume create` [#23830](https://github.com/docker/docker/pull/23830) ++ Add `docker stack ls` [#23886](https://github.com/docker/docker/pull/23886) ++ Add a new `is-task` ps filter [#24411](https://github.com/docker/docker/pull/24411) ++ Add `--env-file` flag to `docker create service` [#24844](https://github.com/docker/docker/pull/24844) ++ Add `--format` on `docker stats` [#24987](https://github.com/docker/docker/pull/24987) ++ Make `docker node ps` default to `self` in swarm node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `--group` in `docker service create` [#25317](https://github.com/docker/docker/pull/25317) ++ Add `--no-trunc` to service/node/stack ps output [#25337(https://github.com/docker/docker/pull/25337) ++ Add Logs to `ContainerAttachOptions` so go clients can request to retrieve container logs as part of the attach process [#26718](https://github.com/docker/docker/pull/26718) ++ Allow client to talk to an older server [#27745](https://github.com/docker/docker/pull/27745) +* Inform user client-side that a container removal is in progress [#26074](https://github.com/docker/docker/pull/26074) ++ Add `Isolation` to the /info endpoint [#26255](https://github.com/docker/docker/pull/26255) ++ Add `userns` to the /info endpoint [#27840](https://github.com/docker/docker/pull/27840) +- Do not allow more than one mode be requested at once in the services endpoint [#26643](https://github.com/docker/docker/pull/26643) ++ Add `--mount` flag to `docker create` and `docker run` [#26825](https://github.com/docker/docker/pull/26825)[#28150](https://github.com/docker/docker/pull/28150) ++ Add capability to /containers/create API to specify mounts in a more granular and safer way [#22373](https://github.com/docker/docker/pull/22373) ++ Add `--format` flag to `network ls` and `volume ls` [#23475](https://github.com/docker/docker/pull/23475) +* Allow the top-level `docker inspect` command to inspect any kind of resource [#23614](https://github.com/docker/docker/pull/23614) +- Allow unsetting the `--entrypoint` in `docker run` or `docker create` [#23718](https://github.com/docker/docker/pull/23718) +* Restructure CLI commands by adding `docker image` and `docker container` commands for more consistency [#26025](https://github.com/docker/docker/pull/26025) +- Remove `COMMAND` column from `service ls` output [#28029](https://github.com/docker/docker/pull/28029) ++ Add `--format` to `docker events` [#26268](https://github.com/docker/docker/pull/26268) +* Allow specifying multiple nodes on `docker node ps` [#26299](https://github.com/docker/docker/pull/26299) +* Restrict fractional digits to 2 decimals in `docker images` output [#26303](https://github.com/docker/docker/pull/26303) ++ Add `--dns-option` to `docker run` [#28186](https://github.com/docker/docker/pull/28186) ++ Add Image ID to container commit event [#28128](https://github.com/docker/docker/pull/28128) ++ Add external binaries version to docker info [#27955](https://github.com/docker/docker/pull/27955) ++ Add information for `Manager Addresses` in the output of `docker info` [#28042](https://github.com/docker/docker/pull/28042) ++ Add a new reference filter for `docker images` [#27872](https://github.com/docker/docker/pull/27872) + +### Runtime + ++ Add `--experimental` daemon flag to enable experimental features, instead of shipping them in a separate build [#27223](https://github.com/docker/docker/pull/27223) ++ Add a `--shutdown-timeout` daemon flag to specify the default timeout (in seconds) to stop containers gracefully before daemon exit [#23036](https://github.com/docker/docker/pull/23036) ++ Add `--stop-timeout` to specify the timeout value (in seconds) for individual containers to stop [#22566](https://github.com/docker/docker/pull/22566) ++ Add a new daemon flag `--userland-proxy-path` to allow configuring the userland proxy instead of using the hardcoded `docker-proxy` from `$PATH` [#26882](https://github.com/docker/docker/pull/26882) ++ Add boolean flag `--init` on `dockerd` and on `docker run` to use [tini](https://github.com/krallin/tini) a zombie-reaping init process as PID 1 [#26061](https://github.com/docker/docker/pull/26061) [#28037](https://github.com/docker/docker/pull/28037) ++ Add a new daemon flag `--init-path` to allow configuring the path to the `docker-init` binary [#26941](https://github.com/docker/docker/pull/26941) ++ Add support for live reloading insecure registry in configuration [#22337](https://github.com/docker/docker/pull/22337) ++ Add support for storage-opt size on Windows daemons [#23391](https://github.com/docker/docker/pull/23391) +* Improve reliability of `docker run --rm` by moving it from the client to the daemon [#20848](https://github.com/docker/docker/pull/20848) ++ Add support for `--cpu-rt-period` and `--cpu-rt-runtime` flags, allowing containers to run real-time threads when `CONFIG_RT_GROUP_SCHED` is enabled in the kernel [#23430](https://github.com/docker/docker/pull/23430) +* Allow parallel stop, pause, unpause [#24761](https://github.com/docker/docker/pull/24761) / [#26778](https://github.com/docker/docker/pull/26778) +* Implement XFS quota for overlay2 [#24771](https://github.com/docker/docker/pull/24771) +- Fix partial/full filter issue in `service tasks --filter` [#24850](https://github.com/docker/docker/pull/24850) +- Allow engine to run inside a user namespace [#25672](https://github.com/docker/docker/pull/25672) +- Fix a race condition between device deferred removal and resume device, when using the devicemapper graphdriver [#23497](https://github.com/docker/docker/pull/23497) +- Add `docker stats` support in Windows [#25737](https://github.com/docker/docker/pull/25737) +- Allow using `--pid=host` and `--net=host` when `--userns=host` [#25771](https://github.com/docker/docker/pull/25771) ++ (experimental) Add metrics output [#25820](https://github.com/docker/docker/pull/25820) +- Fix issue in `docker stats` with `NetworkDisabled=true` [#25905](https://github.com/docker/docker/pull/25905) ++ Add `docker top` support in Windows [#25891](https://github.com/docker/docker/pull/25891) ++ Record pid of exec'd process [#27470](https://github.com/docker/docker/pull/27470) ++ Add support for looking up user/groups via `getent` [#27599](https://github.com/docker/docker/pull/27599) ++ Add new `docker system` command with `df` and `prune` subcommands for system resource management, as well as `docker {container,image,volume,network} prune` subcommands [#26108](https://github.com/docker/docker/pull/26108) [#27525](https://github.com/docker/docker/pull/27525) / [#27525](https://github.com/docker/docker/pull/27525) +- Fix an issue where containers could not be stopped or killed by setting xfs max_retries to 0 upon ENOSPC with devicemapper [#26212](https://github.com/docker/docker/pull/26212) +- Fix `docker cp` failing to copy to a container's volume dir on CentOS with devicemapper [#28047](https://github.com/docker/docker/pull/28047) +* Promote overlay(2) graphdriver [#27932](https://github.com/docker/docker/pull/27932) ++ Add `--seccomp-profile` daemon flag to specify a path to a seccomp profile that overrides the default [#26276](https://github.com/docker/docker/pull/26276) +- Fix ulimits in `docker inspect` when `--default-ulimit` is set on daemon [#26405](https://github.com/docker/docker/pull/26405) +- Add workaround for overlay issues during build in older kernels [#28138](https://github.com/docker/docker/pull/28138) ++ Add `TERM` environment variable on `docker exec -t` [#26461](https://github.com/docker/docker/pull/26461) +* Honor a container’s `--stop-signal` setting upon `docker kill` [#26464](https://github.com/docker/docker/pull/26464) + +### Swarm Mode + ++ Add secret management [#27794](https://github.com/docker/docker/pull/27794) +* Display the endpoint mode in the output of `docker service inspect --pretty` [#26906](https://github.com/docker/docker/pull/26906) +* Make `docker service ps` output more bearable by shortening service IDs in task names [#28088](https://github.com/docker/docker/pull/28088) +* `docker node ps` now defaults to the current node [#25214](https://github.com/docker/docker/pull/25214) ++ Add `-a`/`--all` flags to `docker service ps` and `docker node ps` to show all results [#25983](https://github.com/docker/docker/pull/25983) ++ Add `--dns`, -`-dns-opt`, and `--dns-search` to service create. [#27567](https://github.com/docker/docker/pull/27567) ++ Add `--force` to `docker service update` [#27596](https://github.com/docker/docker/pull/27596) ++ Add `-q` to `docker service ps` [#27654](https://github.com/docker/docker/pull/27654) +* Display number of global services in `docker service ls` [#27710](https://github.com/docker/docker/pull/27710) +- Remove `--name` flag from `docker service update`. This flag is only functional on `docker service create`, so was removed from the `update` command [#26988](https://github.com/docker/docker/pull/26988) +- Fix worker nodes failing to recover because of transient networking issues [#26646](https://github.com/docker/docker/issues/26646) +* Add support for health aware load balancing and DNS records [#27279](https://github.com/docker/docker/pull/27279) +* Add `--hostname` to `docker service create` [#27857](https://github.com/docker/docker/pull/27857) +- Add `--tty` flag to `docker service create`/`update` [#28076](https://github.com/docker/docker/pull/28076) +* Autodetect, store, and expose node IP address as seen by the manager [#27910](https://github.com/docker/docker/pull/27910) +* Encryption at rest of manager keys and raft data [#27967](https://github.com/docker/docker/pull/27967) ++ Add `--update-max-failure-ratio`, `--update-monitor` and `--rollback` flags to `docker service update` [#26421](https://github.com/docker/docker/pull/26421) +- Fix an issue with address autodiscovery on `docker swarm init` running inside a container [#26457](https://github.com/docker/docker/pull/26457) ++ (experimental) Add `docker service logs` command to view logs for a service [#28089](https://github.com/docker/docker/pull/28089) +- Pin images by digest for `docker service create` and `update` [#28173](https://github.com/docker/docker/pull/28173) +- Add short (`-f`) flag for `docker node rm --force` and `docker swarm leave --force` [#28196](https://github.com/docker/docker/pull/28196) ++ Don't repull image if pinned by digest [#28265](https://github.com/docker/docker/pull/28265) ++ swarm-mode support for indows [#27838](https://github.com/docker/docker/pull/27838) + +### Volume + ++ Add support for labels on volumes [#25628](https://github.com/docker/docker/pull/21567) ++ Add support for filtering volumes by label [#25628](https://github.com/docker/docker/pull/25628) +* Add a `--force` flag in `docker volume rm` to forcefully purge the data of the volume that has already been deleted [#23436](https://github.com/docker/docker/pull/23436) +* Enhance `docker volume inspect` to show all options used when creating the volume [#26671](https://github.com/docker/docker/pull/26671) +* Add support for local NFS volumes to resolve hostnames [#27329](https://github.com/docker/docker/pull/27329) + +### Security + +- Fix selinux labeling of volumes shared in a container [#23024](https://github.com/docker/docker/pull/23024) +- Prohibit `/sys/firmware/**` from being accessed with apparmor [#26618](https://github.com/docker/docker/pull/26618) + +### DEPRECATION + +- Marked the `docker daemon` command as deprecated. The daemon is moved to a separate binary (`dockerd`), and should be used instead [#26834](https://github.com/docker/docker/pull/26834) +- Deprecate unversioned API endpoints [#28208](https://github.com/docker/docker/pull/28208) +- Remove Ubuntu 15.10 (Wily Werewolf) as supported platform. Ubuntu 15.10 is EOL, and no longer receives updates [#27042](https://github.com/docker/docker/pull/27042) +- Remove Fedora 22 as supported platform. Fedora 22 is EOL, and no longer receives updates [#27432](https://github.com/docker/docker/pull/27432) +- Deprecate the `repo:shortid` syntax on `docker pull` [#27207](https://github.com/docker/docker/pull/27207) +- Deprecate backing filesystem without d_type for overlay/overlay2 storage drivers [#27433](https://github.com/docker/docker/pull/27433) +- Deprecate MAINTAINER in Dockerfile [#25466](https://github.com/docker/docker/pull/25466) +- Deprecated filter param for endpoint `/images/json` [#27872](https://github.com/docker/docker/pull/27872) + +## 1.12.3 (2016-10-26) + +**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm +based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When +upgrading from an older version of docker, the upgrade process may not +automatically install the updated version of the unit file, or fail to start +the docker service if; + +- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or +- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive + +Starting the docker service will produce an error: + + Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. + +or + + no sockets found via socket activation: make sure the service was started by systemd. + +To resolve this: + +- Backup the current version of the unit file, and replace the file with the + [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) +- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present +- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). + +After making those changes, run `sudo systemctl daemon-reload`, and `sudo +systemctl restart docker` to reload changes and (re)start the docker daemon. + + +### Runtime + +- Fix ambient capability usage in containers (CVE-2016-8867) [#27610](https://github.com/docker/docker/pull/27610) +- Prevent a deadlock in libcontainerd for Windows [#27136](https://github.com/docker/docker/pull/27136) +- Fix error reporting in CopyFileWithTar [#27075](https://github.com/docker/docker/pull/27075) +* Reset health status to starting when a container is restarted [#27387](https://github.com/docker/docker/pull/27387) +* Properly handle shared mount propagation in storage directory [#27609](https://github.com/docker/docker/pull/27609) +- Fix docker exec [#27610](https://github.com/docker/docker/pull/27610) +- Fix backward compatibility with containerd’s events log [#27693](https://github.com/docker/docker/pull/27693) + +### Swarm Mode + +- Fix conversion of restart-policy [#27062](https://github.com/docker/docker/pull/27062) +* Update Swarmkit [#27554](https://github.com/docker/docker/pull/27554) + * Avoid restarting a task that has already been restarted [docker/swarmkit#1305](https://github.com/docker/swarmkit/pull/1305) + * Allow duplicate published ports when they use different protocols [docker/swarmkit#1632](https://github.com/docker/swarmkit/pull/1632) + * Allow multiple randomly assigned published ports on service [docker/swarmkit#1657](https://github.com/docker/swarmkit/pull/1657) + - Fix panic when allocations happen at init time [docker/swarmkit#1651](https://github.com/docker/swarmkit/pull/1651) + +### Networking + +* Update libnetwork [#27559](https://github.com/docker/docker/pull/27559) + - Fix race in serializing sandbox to string [docker/libnetwork#1495](https://github.com/docker/libnetwork/pull/1495) + - Fix race during deletion [docker/libnetwork#1503](https://github.com/docker/libnetwork/pull/1503) + * Reset endpoint port info on connectivity revoke in bridge driver [docker/libnetwork#1504](https://github.com/docker/libnetwork/pull/1504) + - Fix a deadlock in networking code [docker/libnetwork#1507](https://github.com/docker/libnetwork/pull/1507) + - Fix a race in load balancer state [docker/libnetwork#1512](https://github.com/docker/libnetwork/pull/1512) + +### Logging + +* Update fluent-logger-golang to v1.2.1 [#27474](https://github.com/docker/docker/pull/27474) + +### Contrib + +* Update buildtags for armhf ubuntu-trusty [#27327](https://github.com/docker/docker/pull/27327) +* Add AppArmor to runc buildtags for armhf [#27421](https://github.com/docker/docker/pull/27421) + ## 1.12.2 (2016-10-11) **IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm @@ -117,7 +376,7 @@ systemctl restart docker` to reload changes and (re)start the docker daemon. - Fix issue preventing `service update --publish-add` to work as intended [#25428](https://github.com/docker/docker/pull/25428) - Remove `service update --network-add` and `service update --network-rm` flags because this feature is not yet implemented in 1.12, but was inadvertently added - to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) + to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) ### Contrib @@ -1220,7 +1479,7 @@ by another client (#15489) #### Security - Fix tar breakout vulnerability * Extractions are now sandboxed chroot -- Security options are no longer comitted to images +- Security options are no longer committed to images #### Runtime - Fix deadlock in `docker ps -f exited=1` diff --git a/vendor/github.com/docker/docker/CONTRIBUTING.md b/vendor/github.com/docker/docker/CONTRIBUTING.md index 2a4767aea..eb5f8ab0e 100644 --- a/vendor/github.com/docker/docker/CONTRIBUTING.md +++ b/vendor/github.com/docker/docker/CONTRIBUTING.md @@ -123,7 +123,7 @@ However, there might be a way to implement that feature *on top of* Docker. group is for contributors and other people contributing to the Docker project. You can join them without a google account by sending an email to docker-dev+subscribe@googlegroups.com. - After receiving the join-request message, you can simply reply to that to confirm the subscribtion. + After receiving the join-request message, you can simply reply to that to confirm the subscription. diff --git a/vendor/github.com/docker/docker/Dockerfile b/vendor/github.com/docker/docker/Dockerfile index fbe87d188..9d344c813 100644 --- a/vendor/github.com/docker/docker/Dockerfile +++ b/vendor/github.com/docker/docker/Dockerfile @@ -47,6 +47,7 @@ RUN apt-get update && apt-get install -y \ btrfs-tools \ build-essential \ clang \ + cmake \ createrepo \ curl \ dpkg-sig \ @@ -74,6 +75,7 @@ RUN apt-get update && apt-get install -y \ python-websocket \ ubuntu-zfs \ xfsprogs \ + vim-common \ libzfs-dev \ tar \ zip \ @@ -125,19 +127,20 @@ RUN set -x \ # IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines # will need updating, to avoid errors. Ping #docker-maintainers on IRC # with a heads-up. -ENV GO_VERSION 1.7.1 -RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" \ +ENV GO_VERSION 1.7.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ | tar -xzC /usr/local ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +ENV GOPATH /go # Compile Go for cross compilation ENV DOCKER_CROSSPLATFORMS \ linux/386 linux/arm \ darwin/amd64 \ freebsd/amd64 freebsd/386 freebsd/arm \ - windows/amd64 windows/386 + windows/amd64 windows/386 \ + solaris/amd64 # Dependency for golint ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 @@ -194,6 +197,15 @@ RUN git clone https://github.com/docker/docker-py.git /docker-py \ && git checkout -q $DOCKER_PY_COMMIT \ && pip install -r test-requirements.txt +# Install yamllint for validating swagger.yaml +RUN pip install yamllint==1.5.0 + +# Install go-swagger for validating swagger.yaml +ENV GO_SWAGGER_COMMIT c28258affb0b6251755d92489ef685af8d4ff3eb +RUN git clone https://github.com/go-swagger/go-swagger.git /go/src/github.com/go-swagger/go-swagger \ + && (cd /go/src/github.com/go-swagger/go-swagger && git checkout -q $GO_SWAGGER_COMMIT) \ + && go install -v github.com/go-swagger/go-swagger/cmd/swagger + # Set user.email so crosbymichael's in-container merge commits go smoothly RUN git config --global user.email 'docker-dummy@example.com' @@ -222,10 +234,11 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 # See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) -# Install tomlv, runc, containerd and grimes +# Install tomlv, vndr, runc, containerd, tini, docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv runc containerd grimes +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/vendor/github.com/docker/docker/Dockerfile.aarch64 b/vendor/github.com/docker/docker/Dockerfile.aarch64 index a23388bc1..ce03d8db4 100644 --- a/vendor/github.com/docker/docker/Dockerfile.aarch64 +++ b/vendor/github.com/docker/docker/Dockerfile.aarch64 @@ -25,6 +25,7 @@ RUN apt-get update && apt-get install -y \ bash-completion \ btrfs-tools \ build-essential \ + cmake \ createrepo \ curl \ dpkg-sig \ @@ -50,13 +51,9 @@ RUN apt-get update && apt-get install -y \ gccgo \ iproute2 \ iputils-ping \ + vim-common \ --no-install-recommends -# Install armhf loader to use armv6 binaries on armv8 -RUN dpkg --add-architecture armhf \ - && apt-get update \ - && apt-get install -y libc6:armhf - # Get lvm2 source for compiling statically ENV LVM2_VERSION 2.02.103 RUN mkdir -p /usr/local/lvm2 \ @@ -100,13 +97,13 @@ RUN set -x \ # so we use gccgo as bootstrap to build Go from source code. # We don't use the official ARMv6 released binaries as a GOROOT_BOOTSTRAP, because # not all ARM64 platforms support 32-bit mode. 32-bit mode is optional for ARMv8. -ENV GO_VERSION 1.7.1 -RUN mkdir /usr/src/go && curl -fsSL https://storage.googleapis.com/golang/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ +ENV GO_VERSION 1.7.3 +RUN mkdir /usr/src/go && curl -fsSL https://golang.org/dl/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ && cd /usr/src/go/src \ && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash ENV PATH /usr/src/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +ENV GOPATH /go # Only install one version of the registry, because old version which support # schema1 manifests is not working on ARM64, we should skip integration-cli @@ -165,10 +162,11 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda # See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) -# Install tomlv, runc, containerd and grimes +# Install tomlv, vndr, runc, containerd, tini, docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv runc containerd grimes +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/vendor/github.com/docker/docker/Dockerfile.armhf b/vendor/github.com/docker/docker/Dockerfile.armhf index a6ea923df..7f213f8f6 100644 --- a/vendor/github.com/docker/docker/Dockerfile.armhf +++ b/vendor/github.com/docker/docker/Dockerfile.armhf @@ -27,6 +27,7 @@ RUN apt-get update && apt-get install -y \ build-essential \ createrepo \ curl \ + cmake \ dpkg-sig \ git \ iptables \ @@ -46,6 +47,7 @@ RUN apt-get update && apt-get install -y \ python-websocket \ xfsprogs \ tar \ + vim-common \ --no-install-recommends \ && pip install awscli==1.10.15 @@ -66,11 +68,11 @@ RUN cd /usr/local/lvm2 \ # See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL # Install Go -ENV GO_VERSION 1.7.1 -RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-armv6l.tar.gz" \ +ENV GO_VERSION 1.7.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-armv6l.tar.gz" \ | tar -xzC /usr/local ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +ENV GOPATH /go # We're building for armhf, which is ARMv7, so let's be explicit about that ENV GOARCH arm @@ -164,10 +166,11 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791 # See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) -# Install tomlv, runc, containerd and grimes +# Install tomlv, vndr, runc, containerd, tini, docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv runc containerd grimes +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy ENTRYPOINT ["hack/dind"] diff --git a/vendor/github.com/docker/docker/Dockerfile.ppc64le b/vendor/github.com/docker/docker/Dockerfile.ppc64le index 06b278235..3f550d180 100644 --- a/vendor/github.com/docker/docker/Dockerfile.ppc64le +++ b/vendor/github.com/docker/docker/Dockerfile.ppc64le @@ -26,6 +26,7 @@ RUN apt-get update && apt-get install -y \ bash-completion \ btrfs-tools \ build-essential \ + cmake \ createrepo \ curl \ dpkg-sig \ @@ -47,6 +48,7 @@ RUN apt-get update && apt-get install -y \ python-websocket \ xfsprogs \ tar \ + vim-common \ --no-install-recommends # Get lvm2 source for compiling statically @@ -92,8 +94,8 @@ RUN set -x \ # ppc64le doesn't have official go binaries, so use the version of go installed from the image # to build go from source. # NOTE: ppc64le has compatibility issues with older versions of go, so make sure the version >= 1.6 -ENV GO_VERSION 1.7.1 -ENV GO_DOWNLOAD_URL https://storage.googleapis.com/golang/go${GO_VERSION}.src.tar.gz +ENV GO_VERSION 1.7.3 +ENV GO_DOWNLOAD_URL https://golang.org/dl/go${GO_VERSION}.src.tar.gz RUN set -x \ && TEMPDIR="$(mktemp -d)" \ @@ -108,7 +110,7 @@ RUN set -x \ ENV GOROOT_BOOTSTRAP /usr/local/go ENV PATH /usr/local/go/bin/:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +ENV GOPATH /go # Dependency for golint ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 @@ -183,10 +185,11 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974 # See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) -# Install tomlv, runc, containerd and grimes +# Install tomlv, vndr, runc, containerd, tini, docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv runc containerd grimes +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/vendor/github.com/docker/docker/Dockerfile.s390x b/vendor/github.com/docker/docker/Dockerfile.s390x index 1a6e41d0d..6baa3a160 100644 --- a/vendor/github.com/docker/docker/Dockerfile.s390x +++ b/vendor/github.com/docker/docker/Dockerfile.s390x @@ -25,6 +25,7 @@ RUN apt-get update && apt-get install -y \ bash-completion \ btrfs-tools \ build-essential \ + cmake \ createrepo \ curl \ dpkg-sig \ @@ -46,6 +47,7 @@ RUN apt-get update && apt-get install -y \ python-websocket \ xfsprogs \ tar \ + vim-common \ --no-install-recommends # glibc in Debian has a bug specific to s390x that won't be fixed until Debian 8.6 is released @@ -95,12 +97,12 @@ RUN cd /usr/local/lvm2 \ && make install_device-mapper # See https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL -ENV GO_VERSION 1.7.1 -RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-s390x.tar.gz" \ +ENV GO_VERSION 1.7.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-s390x.tar.gz" \ | tar -xzC /usr/local ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +ENV GOPATH /go # Dependency for golint ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 @@ -175,10 +177,11 @@ RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699 # See also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) -# Install tomlv, runc, containerd and grimes +# Install tomlv, vndr, runc, containerd, tini, docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh tomlv runc containerd grimes +RUN /tmp/install-binaries.sh tomlv vndr runc containerd tini proxy # Wrap all commands in the "docker-in-docker" script to allow nested containers ENTRYPOINT ["hack/dind"] diff --git a/vendor/github.com/docker/docker/Dockerfile.simple b/vendor/github.com/docker/docker/Dockerfile.simple index 908e8734a..4a15a381d 100644 --- a/vendor/github.com/docker/docker/Dockerfile.simple +++ b/vendor/github.com/docker/docker/Dockerfile.simple @@ -14,6 +14,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ btrfs-tools \ build-essential \ curl \ + cmake \ gcc \ git \ libapparmor-dev \ @@ -28,6 +29,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ xz-utils \ \ aufs-tools \ + vim-common \ && rm -rf /var/lib/apt/lists/* # Install seccomp: the version shipped in trusty is too old @@ -49,17 +51,18 @@ RUN set -x \ # IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines # will need updating, to avoid errors. Ping #docker-maintainers on IRC # with a heads-up. -ENV GO_VERSION 1.7.1 -RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" \ +ENV GO_VERSION 1.7.3 +RUN curl -fsSL "https://golang.org/dl/go${GO_VERSION}.linux-amd64.tar.gz" \ | tar -xzC /usr/local ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor +ENV GOPATH /go ENV CGO_LDFLAGS -L/lib -# Install runc, containerd and grimes +# Install runc, containerd, tini and docker-proxy # Please edit hack/dockerfile/install-binaries.sh to update them. +COPY hack/dockerfile/binaries-commits /tmp/binaries-commits COPY hack/dockerfile/install-binaries.sh /tmp/install-binaries.sh -RUN /tmp/install-binaries.sh runc containerd grimes +RUN /tmp/install-binaries.sh runc containerd tini proxy ENV AUTO_GOPATH 1 WORKDIR /usr/src/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.solaris b/vendor/github.com/docker/docker/Dockerfile.solaris new file mode 100644 index 000000000..bb342e5e6 --- /dev/null +++ b/vendor/github.com/docker/docker/Dockerfile.solaris @@ -0,0 +1,20 @@ +# Defines an image that hosts a native Docker build environment for Solaris +# TODO: Improve stub + +FROM solaris:latest + +# compile and runtime deps +RUN pkg install --accept \ + git \ + gnu-coreutils \ + gnu-make \ + gnu-tar \ + diagnostic/top \ + golang \ + library/golang/* \ + developer/gcc-* + +ENV GOPATH /go/:/usr/lib/gocode/1.5/ +ENV DOCKER_CROSSPLATFORMS solaris/amd64 +WORKDIR /go/src/github.com/docker/docker +COPY . /go/src/github.com/docker/docker diff --git a/vendor/github.com/docker/docker/Dockerfile.windows b/vendor/github.com/docker/docker/Dockerfile.windows index 368c23678..75c03fc44 100644 --- a/vendor/github.com/docker/docker/Dockerfile.windows +++ b/vendor/github.com/docker/docker/Dockerfile.windows @@ -1,89 +1,182 @@ -# This file describes the standard way to build Docker, using a docker container on Windows -# Server 2016 +# escape=` + +# ----------------------------------------------------------------------------------------- +# This file describes the standard way to build Docker in a container on Windows +# Server 2016. +# +# Maintainer: @jhowardmsft +# ----------------------------------------------------------------------------------------- + + +# Prerequisites: +# -------------- +# +# 1. Windows Server 2016 with all Windows updates applied. Pre-release versions +# of Windows are not supported (eg Windows Server 2016 TP5). The build number +# must be at least 14393. This can be confirmed, for example, by running the +# following from an elevated PowerShell prompt - this sample output is from a +# fully up to date machine as at late October 2016: # +# >> PS C:\> $(gin).WindowsBuildLabEx +# >> 14393.321.amd64fre.rs1_release_inmarket.161004-2338 +# +# 2. Git for Windows (or another git client) must be installed. https://git-scm.com/download/win. +# +# 3. The machine must be configured to run containers. For example, by following +# the quick start guidance at https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start or +# https://github.com/docker/labs/blob/master/windows/windows-containers/Setup.md + + +# ----------------------------------------------------------------------------------------- + + # Usage: +# ----- +# +# The following steps should be run from an (elevated*) Windows PowerShell prompt. +# +# (*In a default installation of containers on Windows following the quick-start guidance at +# https://msdn.microsoft.com/en-us/virtualization/windowscontainers/quick_start/quick_start, +# the docker.exe client must run elevated to be able to connect to the daemon). +# +# 1. Clone the sources from github.com: +# +# >> git clone https://github.com/docker/docker.git C:\go\src\github.com\docker\docker +# >> Cloning into 'C:\go\src\github.com\docker\docker'... +# >> remote: Counting objects: 186216, done. +# >> remote: Compressing objects: 100% (21/21), done. +# >> remote: Total 186216 (delta 5), reused 0 (delta 0), pack-reused 186195 +# >> Receiving objects: 100% (186216/186216), 104.32 MiB | 8.18 MiB/s, done. +# >> Resolving deltas: 100% (123139/123139), done. +# >> Checking connectivity... done. +# >> Checking out files: 100% (3912/3912), done. +# >> PS C:\> +# +# +# 2. Change directory to the cloned docker sources: +# +# >> cd C:\go\src\github.com\docker\docker # -# # Assemble the full dev environment. This is slow the first time. Run this from -# # a directory containing the sources you are validating. For example from -# # c:\go\src\github.com\docker\docker # -# docker build -t docker -f Dockerfile.windows . +# 3. Build a docker image with the components required to build the docker binaries from source: # +# >> docker build -t nativebuildimage -f Dockerfile.windows . # -# # Build docker in a container. Run the following from a Windows cmd command prommpt, -# # replacing c:\built with the directory you want the binaries to be placed on the -# # host system. # -# docker run --rm -v "c:\built:c:\target" docker sh -c 'cd /c/go/src/github.com/docker/docker; hack/make.sh binary; ec=$?; if [ $ec -eq 0 ]; then robocopy /c/go/src/github.com/docker/docker/bundles/$(cat VERSION)/binary /c/target/binary; fi; exit $ec' +# 4. Build the docker executable binaries in a container: # +# >> docker run --name binaries nativebuildimage sh -c 'cd /c/go/src/github.com/docker/docker; hack/make.sh binary' +# +# +# 5. Copy the binaries out of the above container, replacing HostPath with an appropriate destination +# folder on the host system where you want the binaries to be located. +# +# >> $v=$(Get-Content ".\VERSION" -raw).ToString().Replace("`n","").Trim() +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\$v\binary-client\docker-$v.exe C:\HostPath\docker.exe +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\$v\binary-daemon\dockerd.exe C:\HostPath\dockerd.exe +# >> docker cp binaries:C:\go\src\github.com\docker\docker\bundles\$v\binary-daemon\docker-proxy-$v.exe C:\HostPath\docker-proxy.exe +# +# +# 6. (Optional) Remove the interim container holding the built executable binaries: +# +# >> docker rm binaries +# +# +# 7. (Optional) Remove the image used for the container in which the executable +# binaries are build. Tip - it may be useful to keep this image around if you need to +# build multiple times. Then you can take advantage of the builder cache to have an +# image which has all the components required to build the binaries already installed. +# +# >> docker rmi nativebuildimage + + +# ----------------------------------------------------------------------------------------- + + # Important notes: # --------------- # -# The posix utilities from GIT aren't usable interactively as at January 2016. This +# The posix utilities from git aren't usable interactively as at October 2016. This # is because they require a console window which isn't present in a container in Windows. -# See the example at the top of this file. Do NOT use -it in that docker run!!! +# See the example at the top of this file. Do NOT use -it in that docker run. It will not work. +# +# Don't attempt to use a volume for passing the source through to the container. The posix utilities will +# balk at reparse points. # -# Don't try to use a volume for passing the source through. The posix utilities will -# balk at reparse points. Again, see the example at the top of this file on how use a volume -# to get the built binary out of the container. +# The downloaded files are not cleared from the image. go.zip is used by the Windows +# CI servers to ensure the host and image are running consistent versions of go. # -# The steps are minimised dramatically to improve performance +# The GIT installer isn't very good at unattended install. We use techniques described +# at the links below to force it to set the path and other options accordingly. +# >> http://superuser.com/questions/944576/git-for-windows-silent-install-silent-arguments +# and follow through to installer at +# >> https://github.com/ferventcoder/chocolatey-packages/blob/master/automatic/git.install/tools/chocolateyInstall.ps1 +# +# As of October 2016, this does not work on Windows 10 client, just Windows Server 2016, +# and only with the default isolation mode (process). It does not work with isolation mode +# set to Hyper-V containers (hyperv). + +# ----------------------------------------------------------------------------------------- -FROM windowsservercore +# The number of build steps below are explicitly minimised to improve performance. +FROM microsoft/windowsservercore + +# Use PowerShell as the default shell +SHELL ["powershell", "-command"] # Environment variable notes: -# - GO_VERSION must consistent with 'Dockerfile' used by Linux'. +# - GO_VERSION must be consistent with 'Dockerfile' used by Linux. # - FROM_DOCKERFILE is used for detection of building within a container. -ENV GO_VERSION=1.7.1 \ - GIT_LOCATION=https://github.com/git-for-windows/git/releases/download/v2.7.2.windows.1/Git-2.7.2-64-bit.exe \ - GOPATH=C:/go;C:/go/src/github.com/docker/docker/vendor \ +ENV GO_VERSION=1.7.3 ` + GIT_LOCATION=https://github.com/git-for-windows/git/releases/download/v2.10.1.windows.1/Git-2.10.1-64-bit.exe ` + GOPATH=C:\go ` FROM_DOCKERFILE=1 -WORKDIR c:/ - -# Everything downloaded/installed in one go (better performance, esp on TP4) -RUN \ - setx /M Path "c:\git\cmd;c:\git\bin;c:\git\usr\bin;%Path%;c:\gcc\bin;c:\go\bin" && \ - setx GOROOT "c:\go" && \ - powershell -command \ - $ErrorActionPreference = 'Stop'; \ - Function Download-File([string] $source, [string] $target) { \ - $wc = New-Object net.webclient; $wc.Downloadfile($source, $target) \ - } \ - \ - Write-Host INFO: Downloading git...; \ - Download-File %GIT_LOCATION% gitsetup.exe; \ - \ - Write-Host INFO: Downloading go...; \ - Download-File https://storage.googleapis.com/golang/go%GO_VERSION%.windows-amd64.msi go.msi; \ - \ - Write-Host INFO: Downloading compiler 1 of 3...; \ - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip gcc.zip; \ - \ - Write-Host INFO: Downloading compiler 2 of 3...; \ - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip runtime.zip; \ - \ - Write-Host INFO: Downloading compiler 3 of 3...; \ - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip binutils.zip; \ - \ - Write-Host INFO: Installing git...; \ - Start-Process gitsetup.exe -ArgumentList '/VERYSILENT /SUPPRESSMSGBOXES /CLOSEAPPLICATIONS /DIR=c:\git\' -Wait; \ - \ - Write-Host INFO: Installing go..."; \ - Start-Process msiexec -ArgumentList '-i go.msi -quiet' -Wait; \ - \ - Write-Host INFO: Unzipping compiler...; \ - c:\git\usr\bin\unzip.exe -q -o gcc.zip -d /c/gcc; \ - c:\git\usr\bin\unzip.exe -q -o runtime.zip -d /c/gcc; \ - c:\git\usr\bin\unzip.exe -q -o binutils.zip -d /c/gcc"; \ - \ - Write-Host INFO: Removing interim files; \ - Remove-Item *.zip; \ - Remove-Item go.msi; \ - Remove-Item gitsetup.exe; \ - \ +WORKDIR C:\ + +RUN ` + setx /M Path $($Env:PATH+';C:\gcc\bin;C:\go\bin'); ` + ` + $ErrorActionPreference = 'Stop'; ` + Function Download-File([string] $source, [string] $target) { ` + $wc = New-Object net.webclient; $wc.Downloadfile($source, $target) ` + } ` + ` + Write-Host INFO: Downloading git...; ` + Download-File $Env:GIT_LOCATION gitsetup.exe; ` + ` + Write-Host INFO: Downloading go...; ` + Download-File $('https://golang.org/dl/go'+$Env:GO_VERSION+'.windows-amd64.zip') go.zip; ` + ` + Write-Host INFO: Downloading compiler 1 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip gcc.zip; ` + ` + Write-Host INFO: Downloading compiler 2 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip runtime.zip; ` + ` + Write-Host INFO: Downloading compiler 3 of 3...; ` + Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip binutils.zip; ` + ` + Write-Host INFO: Installing git...; ` + $installPath = 'HKLM:\SOFTWARE\Microsoft\Windows\CurrentVersion\Uninstall'; ` + $installItem = 'Git_is1'; ` + New-Item -Path $installPath -Name $installItem -Force; ` + $installKey = $installPath+'\'+$installItem; ` + New-ItemProperty $installKey -Name 'Inno Setup CodeFile: Path Option' -Value 'CmdTools' -PropertyType 'String' -Force; ` + New-ItemProperty $installKey -Name 'Inno Setup CodeFile: Bash Terminal Option' -Value 'ConHost' -PropertyType 'String' -Force; ` + New-ItemProperty $installKey -Name 'Inno Setup CodeFile: CRLF Option' -Value 'CRLFCommitAsIs' -PropertyType 'String' -Force; ` + Start-Process gitsetup.exe -ArgumentList '/VERYSILENT /SUPPRESSMSGBOXES /CLOSEAPPLICATIONS /DIR=C:\git\' -Wait; ` + ` + Write-Host INFO: Expanding go...; ` + Expand-Archive C:\go.zip -DestinationPath C:\; ` + ` + Write-Host INFO: Expanding compiler...; ` + Expand-Archive C:\gcc.zip -DestinationPath C:\gcc -Force; ` + Expand-Archive C:\runtime.zip -DestinationPath C:\gcc -Force; ` + Expand-Archive C:\binutils.zip -DestinationPath C:\gcc -Force; ` + ` Write-Host INFO: Completed # Prepare for building -COPY . /go/src/github.com/docker/docker +COPY . C:\go\src\github.com\docker\docker diff --git a/vendor/github.com/docker/docker/MAINTAINERS b/vendor/github.com/docker/docker/MAINTAINERS index 57b845f3f..39bb8c130 100644 --- a/vendor/github.com/docker/docker/MAINTAINERS +++ b/vendor/github.com/docker/docker/MAINTAINERS @@ -27,6 +27,7 @@ people = [ "aaronlehmann", + "akihirosuda", "aluzzardi", "anusha", "coolljt0725", @@ -77,6 +78,8 @@ # - close an issue or pull request when it's inappropriate or off-topic people = [ + "aboch", + "andrewhsu", "ehazlett", "mgoelzer", "programmerq", @@ -167,11 +170,26 @@ Email = "aaron.lehmann@docker.com" GitHub = "aaronlehmann" + [people.aboch] + Name = "Alessandro Boch" + Email = "aboch@docker.com" + GitHub = "aboch" + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "suda.akihiro@lab.ntt.co.jp" + GitHub = "AkihiroSuda" + [people.aluzzardi] Name = "Andrea Luzzardi" Email = "al@docker.com" GitHub = "aluzzardi" + [people.andrewhsu] + Name = "Andrew Hsu" + Email = "andrewhsu@docker.com" + GitHub = "andrewhsu" + [people.anusha] Name = "Anusha Ragunathan" Email = "anusha@docker.com" diff --git a/vendor/github.com/docker/docker/Makefile b/vendor/github.com/docker/docker/Makefile index e84f7bcd9..31d1fa44c 100644 --- a/vendor/github.com/docker/docker/Makefile +++ b/vendor/github.com/docker/docker/Makefile @@ -50,7 +50,7 @@ DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v /go/src/github.com/docke # enable .go-pkg-cache if DOCKER_INCREMENTAL_BINARY and DOCKER_MOUNT (i.e.DOCKER_HOST) are set PKGCACHE_DIR := $(if $(PKGCACHE_DIR),$(PKGCACHE_DIR),.go-pkg-cache) -PKGCACHE_MAP := gopath:/go/pkg vendor:/go/src/github.com/docker/docker/vendor/pkg goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo +PKGCACHE_MAP := gopath:/go/pkg goroot-linux_amd64_netgo:/usr/local/go/pkg/linux_amd64_netgo DOCKER_MOUNT := $(if $(DOCKER_INCREMENTAL_BINARY),$(DOCKER_MOUNT) $(shell echo $(PKGCACHE_MAP) | sed -E 's@([^ ]*)@-v "$(CURDIR)/$(PKGCACHE_DIR)/\1"@g'),$(DOCKER_MOUNT)) GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) @@ -75,7 +75,7 @@ DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" default: binary all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives - $(DOCKER_RUN_DOCKER) hack/make.sh + $(DOCKER_RUN_DOCKER) bash -c 'hack/validate/default && hack/make.sh' binary: build ## build the linux binaries $(DOCKER_RUN_DOCKER) hack/make.sh binary @@ -133,7 +133,15 @@ tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containin $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor - $(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-default-seccomp validate-gofmt validate-pkg validate-lint validate-test validate-toml validate-vet validate-vendor + $(DOCKER_RUN_DOCKER) hack/validate/all win: build ## cross build the binary for windows $(DOCKER_RUN_DOCKER) hack/make.sh win + +.PHONY: swagger-gen +swagger-gen: + docker run --rm -v $(PWD):/go/src/github.com/docker/docker \ + -w /go/src/github.com/docker/docker \ + --entrypoint hack/generate-swagger-api.sh \ + -e GOPATH=/go \ + quay.io/goswagger/swagger diff --git a/vendor/github.com/docker/docker/VERSION b/vendor/github.com/docker/docker/VERSION index a4ab692a5..2f2e08cfa 100644 --- a/vendor/github.com/docker/docker/VERSION +++ b/vendor/github.com/docker/docker/VERSION @@ -1 +1 @@ -1.13.0-dev +1.14.0-dev diff --git a/vendor/github.com/docker/docker/api/swagger-gen.yaml b/vendor/github.com/docker/docker/api/swagger-gen.yaml new file mode 100644 index 000000000..f07a02737 --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger-gen.yaml @@ -0,0 +1,12 @@ + +layout: + models: + - name: definition + source: asset:model + target: "{{ joinFilePath .Target .ModelPackage }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" + operations: + - name: handler + source: asset:serverOperation + target: "{{ joinFilePath .Target .APIPackage .Package }}" + file_name: "{{ (snakize (pascalize .Name)) }}.go" diff --git a/vendor/github.com/docker/docker/api/swagger.yaml b/vendor/github.com/docker/docker/api/swagger.yaml new file mode 100644 index 000000000..45c53189c --- /dev/null +++ b/vendor/github.com/docker/docker/api/swagger.yaml @@ -0,0 +1,7240 @@ +swagger: "2.0" +schemes: + - "http" + - "https" +produces: + - "application/json" + - "text/plain" +consumes: + - "application/json" + - "text/plain" +basePath: "/v1.25" +info: + title: "Docker Remote API" + version: "1.25" + x-logo: + url: "https://docs.docker.com/images/logo-docker-main.png" + description: | + The Docker API is an HTTP REST API served by Docker Engine. It is the API the Docker client uses to communicate with the Engine, so everything the Docker client can do can be done with the API. + + Most of the client's commands map directly to API endpoints (e.g. `docker ps` is `GET /containers/json`). The notable exception is running containers, which consists of several API calls. [There is example of using `curl` to run a container in the SDK documentation.](#TODO) + + # Errors + + The Remote API uses standard HTTP status codes to indicate the success or failure of the API call. The body of the response will be JSON in the following format: + + ``` + { + "message": "page not found" + } + ``` + + # Versioning + + The API is usually changed in each release of Docker. If you want to write a client that doesn't break when connecting to newer Docker releases, you can lock to a specific API version. + + For Docker 1.13, the API version is 1.25. To lock to this version, you prefix the URL with `/v1.25`. For example, calling `/info` is the same as calling `/v1.25/info`. + + The API uses an open schema model, which means server may add extra properties to responses. Likewise, the server will ignore any extra query parameters and request body properties. When you write clients, you need to ignore additional properties in responses to ensure they do not break when talking to newer Docker daemons. + + # Authentication + + Authentication for registries is handled client side. The client has to send authentication details to various endpoints that need to communicate with registries, such as `POST /images/(name)/push`. These are sent as `X-Registry-Auth` header as a Base64 encoded (JSON) string with the following structure: + + ``` + { + "username": "string", + "password": "string", + "email": "string", + "serveraddress": "string" + } + ``` + + The `serveraddress` is a domain/IP without a protocol. Throughout this structure, double quotes are required. + + If you have already got an identity token from the [`/auth` endpoint](#operation/checkAuthentication), you can just pass this instead of credentials: + + ``` + { + "identitytoken": "9cbaf023786cd7..." + } + ``` + +definitions: + Port: + type: "object" + description: "An open port on a container" + required: [PrivatePort, Type] + properties: + IP: + type: "string" + format: "ip-address" + PrivatePort: + type: "integer" + format: "uint16" + x-nullable: false + description: "Port on the container" + PublicPort: + type: "integer" + format: "uint16" + description: "Port exposed on the host" + Type: + type: "string" + x-nullable: false + enum: ["tcp", "udp"] + example: + PrivatePort: 8080 + PublicPort: 80 + Type: "tcp" + + MountPoint: + type: "object" + description: "A mount point inside a container" + properties: + Type: + type: "string" + Name: + type: "string" + Source: + type: "string" + Destination: + type: "string" + Driver: + type: "string" + Mode: + type: "string" + RW: + type: "boolean" + Propagation: + type: "string" + + DeviceMapping: + type: "object" + description: "A device mapping between the host and container" + properties: + PathOnHost: + type: "string" + PathInContainer: + type: "string" + CgroupPermissions: + type: "string" + example: + PathOnHost: "/dev/deviceName" + PathInContainer: "/dev/deviceName" + CgroupPermissions: "mrw" + + ThrottleDevice: + type: "object" + properties: + Path: + description: "Device path" + type: "string" + Rate: + description: "Rate" + type: "integer" + format: "int64" + minimum: 0 + + Mount: + type: "object" + properties: + Target: + description: "Container path." + type: "string" + Source: + description: "Mount source (e.g. a volume name, a host path)." + Type: + description: | + The mount type. Available types: + + - `bind` Mounts a file or directory from the host into the container. Must exist prior to creating the container. + - `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed. + - `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs. + type: "string" + enum: + - "bind" + - "volume" + - "tmpfs" + ReadOnly: + description: "Whether the mount should be read-only." + type: "boolean" + BindOptions: + description: "Optional configuration for the `bind` type." + type: "object" + properties: + Propagation: + description: "A propagation mode with the value `[r]private`, `[r]shared`, or `[r]slave`." + enum: + - "private" + - "rprivate" + - "shared" + - "rshared" + - "slave" + - "rslave" + VolumeOptions: + description: "Optional configuration for the `volume` type." + type: "object" + properties: + NoCopy: + description: "Populate volume with data from the target." + type: "boolean" + default: false + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + DriverConfig: + description: "Map of driver specific options" + type: "object" + properties: + Name: + description: "Name of the driver to use to create the volume." + type: "string" + Options: + description: "key/value map of driver specific options." + type: "object" + additionalProperties: + type: "string" + TmpfsOptions: + description: "Optional configuration for the `tmpfs` type." + type: "object" + properties: + SizeBytes: + description: "The size for the tmpfs mount in bytes." + type: "integer" + format: "int64" + Mode: + description: "The permission mode for the tmpfs mount in an integer." + type: "integer" + RestartPolicy: + description: | + The behavior to apply when the container exits. The default is not to restart. + + An ever increasing delay (double the previous delay, starting at 100ms) is added before each restart to prevent flooding the server. + type: "object" + properties: + Name: + type: "string" + description: | + - `always` Always restart + - `unless-stopped` Restart always except when the user has manually stopped the container + - `on-failure` Restart only when the container exit code is non-zero + enum: + - "always" + - "unless-stopped" + - "on-failure" + MaximumRetryCount: + type: "integer" + description: "If `on-failure` is used, the number of times to retry before giving up" + default: {} + + Resources: + description: "A container's resources (cgroups config, ulimits, etc)" + type: "object" + properties: + # Applicable to all platforms + CpuShares: + description: "An integer value representing this container's relative CPU weight versus other containers." + type: "integer" + Memory: + description: "Memory limit in bytes." + type: "integer" + default: 0 + # Applicable to UNIX platforms + CgroupParent: + description: "Path to `cgroups` under which the container's `cgroup` is created. If the path is not absolute, the path is considered to be relative to the `cgroups` path of the init process. Cgroups are created if they do not already exist." + type: "string" + BlkioWeight: + description: "Block IO weight (relative weight)." + type: "integer" + minimum: 0 + maximum: 1000 + BlkioWeightDevice: + description: | + Block IO weight (relative device weight) in the form `[{"Path": "device_path", "Weight": weight}]`. + type: "array" + items: + type: "object" + properties: + Path: + type: "string" + Weight: + type: "integer" + minimum: 0 + BlkioDeviceReadBps: + description: | + Limit read rate (bytes per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteBps: + description: | + Limit write rate (bytes per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceReadIOps: + description: | + Limit read rate (IO per second) from a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + BlkioDeviceWriteIOps: + description: | + Limit write rate (IO per second) to a device, in the form `[{"Path": "device_path", "Rate": rate}]`. + type: "array" + items: + $ref: "#/definitions/ThrottleDevice" + CpuPeriod: + description: "The length of a CPU period in microseconds." + type: "integer" + format: "int64" + CpuQuota: + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + format: "int64" + CpuRealtimePeriod: + description: "The length of a CPU real-time period in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpuRealtimeRuntime: + description: "The length of a CPU real-time runtime in microseconds. Set to 0 to allocate no time allocated to real-time tasks." + type: "integer" + format: "int64" + CpusetCpus: + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)" + type: "string" + CpusetMems: + description: "Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems." + type: "string" + Devices: + description: "A list of devices to add to the container." + type: "array" + items: + $ref: "#/definitions/DeviceMapping" + DiskQuota: + description: "Disk limit (in bytes)." + type: "integer" + format: "int64" + KernelMemory: + description: "Kernel memory limit in bytes." + type: "integer" + format: "int64" + MemoryReservation: + description: "Memory soft limit in bytes." + type: "integer" + format: "int64" + MemorySwap: + description: "Total memory limit (memory + swap). Set as `-1` to enable unlimited swap." + type: "integer" + format: "int64" + MemorySwappiness: + description: "Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100." + type: "integer" + format: "int64" + minimum: 0 + maximum: 100 + NanoCPUs: + description: "CPU quota in units of 10-9 CPUs." + type: "integer" + format: "int64" + OomKillDisable: + description: "Disable OOM Killer for the container." + type: "boolean" + PidsLimit: + description: "Tune a container's pids limit. Set -1 for unlimited." + type: "integer" + format: "int64" + Ulimits: + description: | + A list of resource limits to set in the container. For example: `{"Name": "nofile", "Soft": 1024, "Hard": 2048}`" + type: "array" + items: + type: "object" + properties: + Name: + description: "Name of ulimit" + type: "string" + Soft: + description: "Soft limit" + type: "integer" + Hard: + description: "Hard limit" + type: "integer" + # Applicable to Windows + CpuCount: + description: | + The number of usable CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + CpuPercent: + description: | + The usable percentage of the available CPUs (Windows only). + + On Windows Server containers, the processor resource controls are mutually exclusive. The order of precedence is `CPUCount` first, then `CPUShares`, and `CPUPercent` last. + type: "integer" + format: "int64" + IOMaximumIOps: + description: "Maximum IOps for the container system drive (Windows only)" + type: "integer" + format: "int64" + IOMaximumBandwidth: + description: "Maximum IO in bytes per second for the container system drive (Windows only)" + type: "integer" + format: "int64" + + HostConfig: + description: "Container configuration that depends on the host we are running on" + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + # Applicable to all platforms + Binds: + type: "array" + description: | + A list of volume bindings for this container. Each volume binding is a string in one of these forms: + + - `host-src:container-dest` to bind-mount a host path into the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `host-src:container-dest:ro` to make the bind-mount read-only inside the container. Both `host-src`, and `container-dest` must be an _absolute_ path. + - `volume-name:container-dest` to bind-mount a volume managed by a volume driver into the container. `container-dest` must be an _absolute_ path. + - `volume-name:container-dest:ro` to mount the volume read-only inside the container. `container-dest` must be an _absolute_ path. + items: + type: "string" + ContainerIDFile: + type: "string" + description: "Path to a file where the container ID is written" + LogConfig: + type: "object" + description: "The logging configuration for this container" + properties: + Type: + type: "string" + enum: + - "json-file" + - "syslog" + - "journald" + - "gelf" + - "fluentd" + - "awslogs" + - "splunk" + - "etwlogs" + - "none" + Config: + type: "object" + additionalProperties: + type: "string" + NetworkMode: + type: "string" + description: "Network mode to use for this container. Supported standard values are: `bridge`, `host`, `none`, and `container:`. Any other value is taken + as a custom network's name to which this container should connect to." + PortBindings: + type: "object" + description: "A map of exposed container ports and the host port they should map to." + additionalProperties: + type: "object" + properties: + HostIp: + type: "string" + description: "The host IP address" + HostPort: + type: "string" + description: "The host port number, as a string" + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + AutoRemove: + type: "boolean" + description: "Automatically remove the container when the container's process exits. This has no effect if `RestartPolicy` is set." + VolumeDriver: + type: "string" + description: "Driver that this container uses to mount volumes." + VolumesFrom: + type: "array" + description: "A list of volumes to inherit from another container, specified in the form `[:]`." + items: + type: "string" + Mounts: + description: "Specification for mounts to be added to the container." + type: "array" + items: + $ref: "#/definitions/Mount" + + # Applicable to UNIX platforms + CapAdd: + type: "array" + description: "A list of kernel capabilities to add to the container." + items: + type: "string" + CapDrop: + type: "array" + description: "A list of kernel capabilities to drop from the container." + items: + type: "string" + Dns: + type: "array" + description: "A list of DNS servers for the container to use." + items: + type: "string" + DnsOptions: + type: "array" + description: "A list of DNS options." + items: + type: "string" + DnsSearch: + type: "array" + description: "A list of DNS search domains." + items: + type: "string" + ExtraHosts: + type: "array" + description: | + A list of hostnames/IP mappings to add to the container's `/etc/hosts` file. Specified in the form `["hostname:IP"]`. + items: + type: "string" + GroupAdd: + type: "array" + description: "A list of additional groups that the container process will run as." + items: + type: "string" + IpcMode: + type: "string" + description: "IPC namespace to use for the container." + Cgroup: + type: "string" + description: "Cgroup to use for the container." + Links: + type: "array" + description: "A list of links for the container in the form `container_name:alias`." + items: + type: "string" + OomScoreAdj: + type: "integer" + description: "An integer value containing the score given to the container in order to tune OOM killer preferences." + PidMode: + type: "string" + description: | + Set the PID (Process) Namespace mode for the container. It can be either: + + - `"container:"`: joins another container's PID namespace + - `"host"`: use the host's PID namespace inside the container + Privileged: + type: "boolean" + description: "Gives the container full access to the host." + PublishAllPorts: + type: "boolean" + description: "Allocates a random host port for all of a container's exposed ports." + ReadonlyRootfs: + type: "boolean" + description: "Mount the container's root filesystem as read only." + SecurityOpt: + type: "array" + description: "A list of string values to customize labels for MLS + systems, such as SELinux." + items: + type: "string" + StorageOpt: + type: "object" + description: | + Storage driver options for this container, in the form `{"size": "120G"}`. + additionalProperties: + type: "string" + Tmpfs: + type: "object" + description: "List of tmpfs mounts for this container." + additionalProperties: + type: "string" + UTSMode: + type: "string" + description: "UTS namespace to use for the container." + UsernsMode: + type: "string" + description: "Sets the usernamespace mode for the container when usernamespace remapping option is enabled." + ShmSize: + type: "integer" + description: "Size of `/dev/shm` in bytes. If omitted, the system uses 64MB." + minimum: 0 + Sysctls: + type: "object" + description: | + A list of kernel parameters (sysctls) to set in the container. For example: `{"net.ipv4.ip_forward": "1"}` + additionalProperties: + type: "string" + Runtime: + type: "string" + description: "Runtime to use with this container." + # Applicable to Windows + ConsoleSize: + type: "array" + description: "Initial console size, as an `[height, width]` array. (Windows only)" + minItems: 2 + maxItems: 2 + items: + type: "integer" + minimum: 0 + Isolation: + type: "string" + description: "Isolation technology of the container. (Windows only)" + enum: + - "default" + - "process" + - "hyperv" + + Config: + description: "Configuration for a container that is portable between hosts" + type: "object" + properties: + Hostname: + description: "The hostname to use for the container, as a valid RFC 1123 hostname." + type: "string" + Domainname: + description: "The domain name to use for the container." + type: "string" + User: + description: "The user that commands are run as inside the container." + type: "string" + AttachStdin: + description: "Whether to attach to `stdin`." + type: "boolean" + default: false + AttachStdout: + description: "Whether to attach to `stdout`." + type: "boolean" + default: true + AttachStderr: + description: "Whether to attach to `stderr`." + type: "boolean" + default: true + ExposedPorts: + description: | + An object mapping ports to an empty object in the form: + + `{"/": {}}` + type: "object" + additionalProperties: + type: "object" + enum: + - {} + default: {} + Tty: + description: "Attach standard streams to a TTY, including `stdin` if it is not closed." + type: "boolean" + default: false + OpenStdin: + description: "Open `stdin`" + type: "boolean" + default: false + StdinOnce: + description: "Close `stdin` after one attached client disconnects" + type: "boolean" + default: false + Env: + description: | + A list of environment variables to set inside the container in the form `["VAR=value", ...]` + type: "array" + items: + type: "string" + Cmd: + description: "Command to run specified as a string or an array of strings." + type: + - "array" + - "string" + items: + type: "string" + Healthcheck: + description: "A test to perform to check that the container is healthy." + type: "object" + properties: + Test: + description: | + The test to perform. Possible values are: + + - `{}` inherit healthcheck from image or parent image + - `{"NONE"}` disable healthcheck + - `{"CMD", args...}` exec arguments directly + - `{"CMD-SHELL", command}` run command with system's default shell + type: "array" + items: + type: "string" + Interval: + description: "The time to wait between checks in nanoseconds. 0 means inherit." + type: "integer" + Timeout: + description: "The time to wait before considering the check to have hung. 0 means inherit." + type: "integer" + Retries: + description: "The number of consecutive failures needed to consider a container as unhealthy. 0 means inherit." + type: "integer" + ArgsEscaped: + description: "Command is already escaped (Windows only)" + type: "boolean" + Image: + description: "The name of the image to use when creating the container" + type: "string" + Volumes: + description: "An object mapping mount point paths inside the container to empty objects." + type: "object" + properties: + additionalProperties: + type: "object" + enum: + - {} + default: {} + WorkingDir: + description: "The working directory for commands to run in." + type: "string" + Entrypoint: + description: | + The entry point for the container as a string or an array of strings. + + If the array consists of exactly one empty string (`[""]`) then the entry point is reset to system default (i.e., the entry point used by docker when there is no `ENTRYPOINT` instruction in the `Dockerfile`). + type: + - "array" + - "string" + items: + type: "string" + NetworkDisabled: + description: "Disable networking for the container." + type: "boolean" + MacAddress: + description: "MAC address of the container." + type: "string" + OnBuild: + description: "`ONBUILD` metadata that were defined in the image's `Dockerfile`." + type: "array" + items: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + StopSignal: + description: "Signal to stop a container as a string or unsigned integer." + type: "string" + default: "SIGTERM" + StopTimeout: + description: "Timeout to stop a container in seconds." + type: "integer" + default: 10 + Shell: + description: "Shell for when `RUN`, `CMD`, and `ENTRYPOINT` uses a shell." + type: "array" + items: + type: "string" + + NetworkConfig: + description: "TODO: check is correct" + type: "object" + properties: + Bridge: + type: "string" + Gateway: + type: "string" + Address: + type: "string" + IPPrefixLen: + type: "integer" + MacAddress: + type: "string" + PortMapping: + type: "string" + Ports: + type: "array" + items: + $ref: "#/definitions/Port" + + GraphDriver: + description: "Information about this container's graph driver." + type: "object" + properties: + Name: + type: "string" + Data: + type: "object" + additionalProperties: + type: "string" + + Image: + type: "object" + properties: + Id: + type: "string" + RepoTags: + type: "array" + items: + type: "string" + RepoDigests: + type: "array" + items: + type: "string" + Parent: + type: "string" + Comment: + type: "string" + Created: + type: "string" + Container: + type: "string" + ContainerConfig: + $ref: "#/definitions/Config" + DockerVersion: + type: "string" + Author: + type: "string" + Config: + $ref: "#/definitions/Config" + Architecture: + type: "string" + Os: + type: "string" + Size: + type: "integer" + format: "int64" + VirtualSize: + type: "integer" + format: "int64" + GraphDriver: + $ref: "#/definitions/GraphDriver" + RootFS: + type: "object" + properties: + Type: + type: "string" + Layers: + type: "array" + items: + type: "string" + BaseLayer: + type: "string" + + ImageSummary: + type: "object" + required: + - Id + - ParentId + - RepoTags + - RepoDigests + - Created + - Size + - SharedSize + - VirtualSize + - Labels + - Containers + properties: + Id: + type: "string" + x-nullable: false + ParentId: + type: "string" + x-nullable: false + RepoTags: + type: "array" + x-nullable: false + items: + type: "string" + RepoDigests: + type: "array" + x-nullable: false + items: + type: "string" + Created: + type: "integer" + x-nullable: false + Size: + type: "integer" + x-nullable: false + SharedSize: + type: "integer" + x-nullable: false + VirtualSize: + type: "integer" + x-nullable: false + Labels: + type: "object" + x-nullable: false + additionalProperties: + type: "string" + Containers: + x-nullable: false + type: "integer" + + AuthConfig: + type: "object" + properties: + username: + type: "string" + password: + type: "string" + email: + type: "string" + serveraddress: + type: "string" + example: + username: "hannibal" + password: "xxxx" + serveraddress: "https://index.docker.io/v1/" + + ProcessConfig: + type: "object" + properties: + privileged: + type: "boolean" + user: + type: "string" + tty: + type: "boolean" + entrypoint: + type: "string" + arguments: + type: "array" + items: + type: "string" + + Volume: + type: "object" + required: [Name, Driver, Mountpoint, Labels, Scope, Options] + properties: + Name: + type: "string" + description: "Name of the volume." + x-nullable: false + Driver: + type: "string" + description: "Name of the volume driver used by the volume." + x-nullable: false + Mountpoint: + type: "string" + description: "Mount path of the volume on the host." + x-nullable: false + Status: + type: "object" + description: | + Low-level details about the volume, provided by the volume driver. + Details are returned as a map with key/value pairs: + `{"key":"value","key2":"value2"}`. + + The `Status` field is optional, and is omitted if the volume driver + does not support this feature. + additionalProperties: + type: "object" + Labels: + type: "object" + description: "User-defined key/value metadata." + x-nullable: false + additionalProperties: + type: "string" + Scope: + type: "string" + description: "The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level." + default: "local" + x-nullable: false + enum: ["local", "global"] + Options: + type: "object" + description: "The driver specific options used when creating the volume." + additionalProperties: + type: "string" + UsageData: + type: "object" + required: [Size, RefCount] + properties: + Size: + type: "integer" + description: "The disk space used by the volume (local driver only)" + default: -1 + x-nullable: false + RefCount: + type: "integer" + default: -1 + description: "The number of containers referencing this volume." + x-nullable: false + + example: + Name: "tardis" + Driver: "custom" + Mountpoint: "/var/lib/docker/volumes/tardis" + Status: + hello: "world" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + + Network: + type: "object" + properties: + Name: + type: "string" + Id: + type: "string" + Created: + type: "string" + format: "dateTime" + Scope: + type: "string" + Driver: + type: "string" + EnableIPv6: + type: "boolean" + IPAM: + $ref: "#/definitions/IPAM" + Internal: + type: "boolean" + Containers: + type: "object" + additionalProperties: + $ref: "#/definitions/NetworkContainer" + Options: + type: "object" + additionalProperties: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + example: + Name: "net01" + Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" + Created: "2016-10-19T04:33:30.360899459Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + IPAM: + Driver: "default" + Config: + - Subnet: "172.19.0.0/16" + Gateway: "172.19.0.1" + Options: + foo: "bar" + Internal: false + Containers: + 19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: + Name: "test" + EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" + MacAddress: "02:42:ac:13:00:02" + IPv4Address: "172.19.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + IPAM: + type: "object" + properties: + Driver: + description: "Name of the IPAM driver to use." + type: "string" + default: "default" + Config: + description: "List of IPAM configuration options, specified as a map: `{\"Subnet\": , \"IPRange\": , \"Gateway\": , \"AuxAddress\": }`" + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + Options: + description: "Driver-specific options, specified as a map." + type: "array" + items: + type: "object" + additionalProperties: + type: "string" + NetworkContainer: + type: "object" + properties: + EndpointID: + type: "string" + MacAddress: + type: "string" + IPv4Address: + type: "string" + IPv6Address: + type: "string" + + BuildInfo: + type: "object" + properties: + id: + type: "string" + stream: + type: "string" + error: + type: "string" + errorDetail: + $ref: "#/definitions/ErrorDetail" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + CreateImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + + PushImageInfo: + type: "object" + properties: + error: + type: "string" + status: + type: "string" + progress: + type: "string" + progressDetail: + $ref: "#/definitions/ProgressDetail" + ErrorDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "string" + ProgressDetail: + type: "object" + properties: + code: + type: "integer" + message: + type: "integer" + + ErrorResponse: + description: "Represents an error." + type: "object" + required: ["message"] + properties: + message: + description: "The error message." + type: "string" + x-nullable: false + example: + message: "Something went wrong." + + IdResponse: + description: "Response to an API call that returns just an Id" + type: "object" + required: ["Id"] + properties: + Id: + description: "The id of the newly created object." + type: "string" + x-nullable: false + + EndpointSettings: + description: "Configuration for a network endpoint." + type: "object" + properties: + IPAMConfig: + description: "IPAM configurations for the endpoint" + type: "object" + properties: + IPv4Address: + type: "string" + IPv6Address: + type: "string" + LinkLocalIPs: + type: "array" + items: + type: "string" + Links: + type: "array" + items: + type: "string" + Aliases: + type: "array" + items: + type: "string" + NetworkID: + type: "string" + EndpointID: + type: "string" + Gateway: + type: "string" + IPAddress: + type: "string" + IPPrefixLen: + type: "integer" + IPv6Gateway: + type: "string" + GlobalIPv6Address: + type: "string" + GlobalIPv6PrefixLen: + type: "integer" + format: "int64" + MacAddress: + type: "string" + + PluginMount: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Source, Destination, Type, Options] + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Source: + type: "string" + Destination: + type: "string" + x-nullable: false + Type: + type: "string" + x-nullable: false + Options: + type: "array" + items: + type: "string" + + PluginDevice: + type: "object" + required: [Name, Description, Settable, Path] + x-nullable: false + properties: + Name: + type: "string" + x-nullable: false + Description: + type: "string" + x-nullable: false + Settable: + type: "array" + items: + type: "string" + Path: + type: "string" + + PluginEnv: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "string" + + PluginInterfaceType: + type: "object" + x-nullable: false + required: [Prefix, Capability, Version] + properties: + Prefix: + type: "string" + x-nullable: false + Capability: + type: "string" + x-nullable: false + Version: + type: "string" + x-nullable: false + + Plugin: + description: "A plugin for the Remote API" + type: "object" + required: [Settings, Enabled, Config, Name, Tag] + properties: + Id: + type: "string" + Name: + type: "string" + x-nullable: false + Tag: + type: "string" + x-nullable: false + Enabled: + description: "True when the plugin is running. False when the plugin is not running, only installed." + type: "boolean" + x-nullable: false + Settings: + description: "Settings that can be modified by users." + type: "object" + x-nullable: false + required: [Args, Devices, Env, Mounts] + properties: + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Env: + type: "array" + items: + type: "string" + Args: + type: "array" + items: + type: "string" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + Config: + description: "The config of a plugin." + type: "object" + x-nullable: false + required: + - Description + - Documentation + - Interface + - Entrypoint + - Workdir + - Network + - Capabilities + - Mounts + - Devices + - Env + - Args + properties: + Description: + type: "string" + x-nullable: false + Documentation: + type: "string" + x-nullable: false + Interface: + description: "The interface between Docker and the plugin" + x-nullable: false + type: "object" + required: [Types, Socket] + properties: + Types: + type: "array" + items: + $ref: "#/definitions/PluginInterfaceType" + Socket: + type: "string" + x-nullable: false + Entrypoint: + type: "array" + items: + type: "string" + Workdir: + type: "string" + x-nullable: false + User: + type: "object" + x-nullable: false + properties: + UID: + type: "integer" + format: "uint32" + GID: + type: "integer" + format: "uint32" + Network: + type: "object" + x-nullable: false + required: [Type] + properties: + Type: + x-nullable: false + type: "string" + Capabilities: + type: "array" + items: + type: "string" + Mounts: + type: "array" + items: + $ref: "#/definitions/PluginMount" + Devices: + type: "array" + items: + $ref: "#/definitions/PluginDevice" + Env: + type: "array" + items: + $ref: "#/definitions/PluginEnv" + Args: + type: "object" + x-nullable: false + required: [Name, Description, Settable, Value] + properties: + Name: + x-nullable: false + type: "string" + Description: + x-nullable: false + type: "string" + Settable: + type: "array" + items: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/no-remove" + Tag: "latest" + Active: true + Config: + Mounts: + - Name: "" + Description: "" + Settable: null + Source: "/data" + Destination: "/data" + Type: "bind" + Options: + - "shared" + - "rbind" + - Name: "" + Description: "" + Settable: null + Source: null + Destination: "/foobar" + Type: "tmpfs" + Options: null + Env: + - "DEBUG=1" + Args: null + Devices: null + Manifest: + ManifestVersion: "v0" + Description: "A test plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "plugin-no-remove" + - "/data" + Workdir: "" + User: {} + Network: + Type: "host" + Capabilities: null + Mounts: + - Name: "" + Description: "" + Settable: null + Source: "/data" + Destination: "/data" + Type: "bind" + Options: + - "shared" + - "rbind" + - Name: "" + Description: "" + Settable: null + Source: null + Destination: "/foobar" + Type: "tmpfs" + Options: null + Devices: + - Name: "device" + Description: "a host device to mount" + Settable: null + Path: "/dev/cpu_dma_latency" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "1" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + + NodeSpec: + type: "object" + properties: + Name: + description: "Name for the node." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Role: + description: "Role of the node." + type: "string" + enum: + - "worker" + - "manager" + Availability: + description: "Availability of the node." + type: "string" + enum: + - "active" + - "pause" + - "drain" + example: + Availability: "active" + Name: "node-name" + Role: "manager" + Labels: + foo: "bar" + Node: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/NodeSpec" + Description: + type: "object" + properties: + Hostname: + type: "string" + Platform: + type: "object" + properties: + Architecture: + type: "string" + OS: + type: "string" + Resources: + type: "object" + properties: + NanoCPUs: + type: "integer" + format: "int64" + MemoryBytes: + type: "integer" + format: "int64" + Engine: + type: "object" + properties: + EngineVersion: + type: "string" + Labels: + type: "object" + additionalProperties: + type: "string" + Plugins: + type: "array" + items: + type: "object" + properties: + Type: + type: "string" + Name: + type: "string" + example: + ID: "24ifsmvkjbyhk" + Version: + Index: 8 + CreatedAt: "2016-06-07T20:31:11.853781916Z" + UpdatedAt: "2016-06-07T20:31:11.999868824Z" + Spec: + Name: "my-node" + Role: "manager" + Availability: "active" + Labels: + foo: "bar" + Description: + Hostname: "bf3067039e47" + Platform: + Architecture: "x86_64" + OS: "linux" + Resources: + NanoCPUs: 4000000000 + MemoryBytes: 8272408576 + Engine: + EngineVersion: "1.13.0" + Labels: + foo: "bar" + Plugins: + - Type: "Volume" + Name: "local" + - Type: "Network" + Name: "bridge" + - Type: "Network" + Name: "null" + - Type: "Network" + Name: "overlay" + Status: + State: "ready" + Addr: "172.17.0.2" + ManagerStatus: + Leader: true + Reachability: "reachable" + Addr: "172.17.0.2:2377" + SwarmSpec: + description: "User modifiable swarm configuration." + type: "object" + properties: + Name: + description: "Name of the swarm." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Orchestration: + description: "Orchestration configuration." + type: "object" + properties: + TaskHistoryRetentionLimit: + description: "The number of historic tasks to keep per instance or node. If negative, never remove completed or failed tasks." + type: "integer" + format: "int64" + Raft: + description: "Raft configuration." + type: "object" + properties: + SnapshotInterval: + description: "The number of log entries between snapshots." + type: "integer" + format: "int64" + KeepOldSnapshots: + description: "The number of snapshots to keep beyond the current snapshot." + type: "integer" + format: "int64" + LogEntriesForSlowFollowers: + description: "The number of log entries to keep around to sync up slow followers after a snapshot is created." + type: "integer" + format: "int64" + ElectionTick: + description: | + The number of ticks that a follower will wait for a message from the leader before becoming a candidate and starting an election. `ElectionTick` must be greater than `HeartbeatTick`. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + HeartbeatTick: + description: | + The number of ticks between heartbeats. Every HeartbeatTick ticks, the leader will send a heartbeat to the followers. + + A tick currently defaults to one second, so these translate directly to seconds currently, but this is NOT guaranteed. + type: "integer" + Dispatcher: + description: "Dispatcher configuration." + type: "object" + properties: + HeartbeatPeriod: + description: "The delay for an agent to send a heartbeat to the dispatcher." + type: "integer" + format: "int64" + CAConfig: + description: "CA configuration." + type: "object" + properties: + NodeCertExpiry: + description: "The duration node certificates are issued for." + type: "integer" + format: "int64" + ExternalCAs: + description: "Configuration for forwarding signing requests to an external certificate authority." + type: "array" + items: + type: "object" + properties: + Protocol: + description: "Protocol for communication with the external CA (currently only `cfssl` is supported)." + type: "string" + enum: + - "cfssl" + default: "cfssl" + URL: + description: "URL where certificate signing requests should be sent." + type: "string" + Options: + description: "An object with key/value pairs that are interpreted as protocol-specific options for the external CA driver." + type: "object" + additionalProperties: + type: "string" + TaskDefaults: + description: "Defaults for creating tasks in this cluster." + type: "object" + properties: + LogDriver: + description: | + The log driver to use for tasks created in the orchestrator if unspecified by a service. + + Updating this value will only have an affect on new tasks. Old tasks will continue use their previously configured log driver until recreated. + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + example: + Name: "default" + Orchestration: + TaskHistoryRetentionLimit: 10 + Raft: + SnapshotInterval: 10000 + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + ElectionTick: 3 + Dispatcher: + HeartbeatPeriod: 5000000000 + CAConfig: + NodeCertExpiry: 7776000000000000 + JoinTokens: + Worker: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-1awxwuwd3z9j1z3puu7rcgdbx" + Manager: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + # The Swarm information for `GET /info`. It is the same as `GET /swarm`, but + # without `JoinTokens`. + ClusterInfo: + type: "object" + properties: + ID: + description: "The ID of the swarm." + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/SwarmSpec" + TaskSpec: + description: "User modifiable task configuration." + type: "object" + properties: + ContainerSpec: + type: "object" + properties: + Image: + description: "The image name to use for the container." + type: "string" + Command: + description: "The command to be run in the image." + type: "array" + items: + type: "string" + Args: + description: "Arguments to the command." + type: "array" + items: + type: "string" + Env: + description: "A list of environment variables in the form `VAR=value`." + type: "array" + items: + type: "string" + Dir: + description: "The working directory for commands to run in." + type: "string" + User: + description: "The user inside the container." + type: "string" + Labels: + description: "User-defined key/value data." + type: "object" + additionalProperties: + type: "string" + TTY: + description: "Whether a pseudo-TTY should be allocated." + type: "boolean" + Mounts: + description: "Specification for mounts to be added to containers created as part of the service." + type: "array" + items: + $ref: "#/definitions/Mount" + StopGracePeriod: + description: "Amount of time to wait for the container to terminate before forcefully killing it." + type: "integer" + format: "int64" + Resources: + description: "Resource requirements which apply to each individual container created as part of the service." + type: "object" + properties: + Limits: + description: "Define resources limits." + type: "object" + properties: + NanoCPUs: + description: "CPU limit in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory limit in Bytes." + type: "integer" + format: "int64" + Reservation: + description: "Define resources reservation." + properties: + NanoCPUs: + description: "CPU reservation in units of 10-9 CPU shares." + type: "integer" + format: "int64" + MemoryBytes: + description: "Memory reservation in Bytes." + type: "integer" + format: "int64" + RestartPolicy: + description: "Specification for the restart policy which applies to containers created as part of this service." + type: "object" + properties: + Condition: + description: "Condition for restart." + type: "string" + enum: + - "none" + - "on-failure" + - "any" + Delay: + description: "Delay between restart attempts." + type: "integer" + format: "int64" + MaxAttempts: + description: "Maximum attempts to restart a given container before giving up (default value is 0, which is ignored)." + type: "integer" + format: "int64" + default: 0 + Window: + description: "Windows is the time window used to evaluate the restart policy (default value is 0, which is unbounded)." + type: "integer" + format: "int64" + default: 0 + Placement: + type: "object" + properties: + Constraints: + description: "An array of constraints." + type: "array" + items: + type: "string" + ForceUpdate: + description: "A counter that triggers an update even if no relevant parameters have been changed." + type: "integer" + Networks: + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + LogDriver: + description: "Specifies the log driver to use for tasks created from this spec. If not present, the default one for the swarm will be used, finally falling back to the engine default if not specified." + type: "object" + properties: + Name: + type: "string" + Options: + type: "object" + additionalProperties: + type: "string" + TaskState: + type: "string" + enum: + - "new" + - "allocated" + - "pending" + - "assigned" + - "accepted" + - "preparing" + - "ready" + - "starting" + - "running" + - "complete" + - "shutdown" + - "failed" + - "rejected" + Task: + type: "object" + properties: + ID: + description: "The ID of the task." + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Name: + description: "Name of the task." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + Spec: + $ref: "#/definitions/TaskSpec" + ServiceID: + description: "The ID of the service this task is part of." + type: "string" + Slot: + type: "integer" + NodeID: + description: "The ID of the node that this task is on." + type: "string" + Status: + type: "object" + properties: + Timestamp: + type: "string" + format: "dateTime" + State: + $ref: "#/definitions/TaskState" + Message: + type: "string" + Err: + type: "string" + ContainerStatus: + type: "object" + properties: + ContainerID: + type: "string" + PID: + type: "integer" + ExitCode: + type: "integer" + DesiredState: + $ref: "#/definitions/TaskState" + example: + ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + ServiceSpec: + description: "User modifiable configuration for a service." + properties: + Name: + description: "Name of the service." + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + TaskTemplate: + $ref: "#/definitions/TaskSpec" + Mode: + description: "Scheduling mode for the service." + type: "object" + properties: + Replicated: + type: "object" + properties: + Replicas: + type: "integer" + format: "int64" + Global: + type: "object" + UpdateConfig: + description: "Specification for the update strategy of the service." + type: "object" + properties: + Parallelism: + description: "Maximum number of tasks to be updated in one iteration (0 means unlimited parallelism)." + type: "integer" + format: "int64" + Delay: + description: "Amount of time between updates, in nanoseconds." + type: "integer" + format: "int64" + FailureAction: + description: "Action to take if an updated task fails to run, or stops running during the update." + type: "string" + enum: + - "continue" + - "pause" + Monitor: + description: "Amount of time to monitor each updated task for failures, in nanoseconds." + type: "integer" + format: "int64" + MaxFailureRatio: + description: "The fraction of tasks that may fail during an update before the failure action is invoked, specified as a floating point number between 0 and 1." + type: "number" + default: 0 + Networks: + description: "Array of network names or IDs to attach the service to." + type: "array" + items: + type: "object" + properties: + Target: + type: "string" + Aliases: + type: "array" + items: + type: "string" + EndpointSpec: + $ref: "#/definitions/EndpointSpec" + EndpointPortConfig: + type: "object" + properties: + Name: + type: "string" + Protocol: + type: "string" + enum: + - "tcp" + - "udp" + TargetPort: + description: "The port inside the container." + type: "integer" + PublishedPort: + description: "The port on the swarm hosts." + type: "integer" + EndpointSpec: + description: "Properties that can be configured to access and load balance a service." + type: "object" + properties: + Mode: + description: "The mode of resolution to use for internal load balancing + between tasks." + type: "string" + enum: + - "vip" + - "dnsrr" + default: "vip" + Ports: + description: "List of exposed ports that this service is accessible on from the outside. Ports can only be provided if `vip` resolution mode is used." + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + Service: + type: "object" + properties: + ID: + type: "string" + Version: + type: "object" + properties: + Index: + type: "integer" + format: "int64" + CreatedAt: + type: "string" + format: "dateTime" + UpdatedAt: + type: "string" + format: "dateTime" + Spec: + $ref: "#/definitions/ServiceSpec" + Endpoint: + type: "object" + properties: + Spec: + $ref: "#/definitions/EndpointSpec" + Ports: + type: "array" + items: + $ref: "#/definitions/EndpointPortConfig" + VirtualIPs: + type: "array" + items: + type: "object" + properties: + NetworkID: + type: "string" + Addr: + type: "string" + UpdateStatus: + description: "The status of a service update." + type: "object" + properties: + State: + type: "string" + enum: + - "updating" + - "paused" + - "completed" + StartedAt: + type: "string" + format: "dateTime" + CompletedAt: + type: "string" + format: "dateTime" + Message: + type: "string" + example: + ID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Version: + Index: 19 + CreatedAt: "2016-06-07T21:05:51.880065305Z" + UpdatedAt: "2016-06-07T21:07:29.962229872Z" + Spec: + Name: "hopeful_cori" + TaskTemplate: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + FailureAction: "pause" + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Endpoint: + Spec: + Mode: "vip" + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + Ports: + - + Protocol: "tcp" + TargetPort: 6379 + PublishedPort: 30001 + VirtualIPs: + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.2/16" + - + NetworkID: "4qvuz4ko70xaltuqbt8956gd1" + Addr: "10.255.0.3/16" + ImageDeleteResponse: + type: "object" + properties: + Untagged: + description: "The image ID of an image that was untagged" + type: "string" + Deleted: + description: "The image ID of an image that was deleted" + type: "string" + ContainerSummary: + type: "array" + items: + type: "object" + properties: + Id: + description: "The ID of this container" + type: "string" + x-go-name: "ID" + Names: + description: "The names that this container has been given" + type: "array" + items: + type: "string" + Image: + description: "The name of the image used when creating this container" + type: "string" + ImageID: + description: "The ID of the image that this container was created from" + type: "string" + Command: + description: "Command to run when starting the container" + type: "string" + Created: + description: "When the container was created" + type: "integer" + format: "int64" + Ports: + description: "The ports exposed by this container" + type: "array" + items: + $ref: "#/definitions/Port" + SizeRw: + description: "The size of files that have been created or changed by this container" + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container" + type: "integer" + format: "int64" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + State: + description: "The state of this container (e.g. `Exited`)" + type: "string" + Status: + description: "Additional human-readable status of this container (e.g. `Exit 0`)" + type: "string" + HostConfig: + type: "object" + properties: + NetworkMode: + type: "string" + NetworkSettings: + description: "A summary of the container's network settings" + type: "object" + properties: + Networks: + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + Mounts: + type: "array" + items: + $ref: "#/definitions/Mount" + +paths: + /containers/json: + get: + summary: "List containers" + operationId: "GetContainerList" + produces: + - "application/json" + parameters: + - name: "all" + in: "query" + description: "Return all containers. By default, only running containers are shown" + type: "boolean" + default: false + - name: "limit" + in: "query" + description: "Return this number of most recently created containers, including non-running ones." + type: "integer" + - name: "size" + in: "query" + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: | + Filters to process on the container list, encoded as JSON (a `map[string][]string`). For example, `{"status": ["paused"]}` will only return paused containers. + + Available filters: + - `exited=` containers with exit code of `` + - `status=`(`created`|`restarting`|`running`|`removing`|`paused`|`exited`|`dead`) + - `label=key` or `label="key=value"` of a container label + - `isolation=`(`default`|`process`|`hyperv`) (Windows daemon only) + - `id=` a container's ID + - `name=` a container's name + - `is-task=`(`true`|`false`) + - `ancestor`=(`[:]`, ``, or ``) + - `before`=(`` or ``) + - `since`=(`` or ``) + - `volume`=(`` or ``) + - `network`=(`` or ``) + - `health`=(`starting`|`healthy`|`unhealthy`|`none`) + type: "string" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/ContainerSummary" + examples: + application/json: + - Id: "8dfafdbc3a40" + Names: + - "/boring_feynman" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 1" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: + - PrivatePort: 2222 + PublicPort: 3333 + Type: "tcp" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "2cdc4edb1ded3631c81f57966563e5c8525b81121bb3706a9a9a3ae102711f3f" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:02" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + - Id: "9cd87474be90" + Names: + - "/coolName" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 222222" + Created: 1367854155 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "88eaed7b37b38c2a3f0c4bc796494fdf51b270c2d22656412a2ca5d559a64d7a" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.8" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:08" + Mounts: [] + - Id: "3176a2479c92" + Names: + - "/sleepy_dog" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 3333333333333333" + Created: 1367854154 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "8b27c041c30326d59cd6e6f510d4f8d1d570a228466f956edf7815508f78e30d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.6" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:06" + Mounts: [] + - Id: "4cb07b47f9fb" + Names: + - "/running_cat" + Image: "ubuntu:latest" + ImageID: "d74508fb6632491cea586a1fd7d748dfc5274cd6fdfedee309ecdcbc2bf5cb82" + Command: "echo 444444444444444444444444444444444" + Created: 1367854152 + State: "Exited" + Status: "Exit 0" + Ports: [] + Labels: {} + SizeRw: 12288 + SizeRootFs: 0 + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "d91c7b2f0644403d7ef3095985ea0e2370325cd2332ff3a3225c4247328e66e9" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.5" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:11:00:05" + Mounts: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Container" + /containers/create: + post: + summary: "Create a container" + operationId: "ContainerCreate" + consumes: + - "application/json" + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "name" + in: "query" + description: "Assign the specified name to the container. Must match `/?[a-zA-Z0-9_-]+`." + type: "string" + pattern: "/?[a-zA-Z0-9_-]+" + - name: "body" + in: "body" + description: "Container to create" + schema: + allOf: + - $ref: "#/definitions/Config" + - type: "object" + properties: + HostConfig: + $ref: "#/definitions/HostConfig" + NetworkingConfig: + description: "This container's networking configuration." + type: "object" + properties: + EndpointsConfig: + description: "A mapping of network name to endpoint configuration for that network." + type: "object" + additionalProperties: + $ref: "#/definitions/EndpointSettings" + example: + Hostname: "" + Domainname: "" + User: "" + AttachStdin: false + AttachStdout: true + AttachStderr: true + Tty: false + OpenStdin: false + StdinOnce: false + Env: + - "FOO=bar" + - "BAZ=quux" + Cmd: + - "date" + Entrypoint: "" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + Volumes: + /volumes/data: {} + WorkingDir: "" + NetworkDisabled: false + MacAddress: "12:34:56:78:9a:bc" + ExposedPorts: + 22/tcp: {} + StopSignal: "SIGTERM" + StopTimeout: 10 + HostConfig: + Binds: + - "/tmp:/tmp" + Links: + - "redis3:redis" + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + NanoCPUs: 500000 + CpuPercent: 80 + CpuShares: 512 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpuQuota: 50000 + CpusetCpus: "0,1" + CpusetMems: "0,1" + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 300 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceWriteIOps: + - {} + MemorySwappiness: 60 + OomKillDisable: false + OomScoreAdj: 500 + PidMode: "" + PidsLimit: -1 + PortBindings: + 22/tcp: + - HostPort: "11022" + PublishAllPorts: false + Privileged: false + ReadonlyRootfs: false + Dns: + - "8.8.8.8" + DnsOptions: + - "" + DnsSearch: + - "" + VolumesFrom: + - "parent" + - "other:ro" + CapAdd: + - "NET_ADMIN" + CapDrop: + - "MKNOD" + GroupAdd: + - "newgroup" + RestartPolicy: + Name: "" + MaximumRetryCount: 0 + AutoRemove: true + NetworkMode: "bridge" + Devices: [] + Ulimits: + - {} + LogConfig: + Type: "json-file" + Config: {} + SecurityOpt: [] + StorageOpt: {} + CgroupParent: "" + VolumeDriver: "" + ShmSize: 67108864 + NetworkingConfig: + EndpointsConfig: + isolated_nw: + IPAMConfig: + IPv4Address: "172.20.30.33" + IPv6Address: "2001:db8:abcd::3033" + LinkLocalIPs: + - "169.254.34.68" + - "fe80::3468" + Links: + - "container_1" + - "container_2" + Aliases: + - "server_x" + - "server_y" + + required: true + responses: + 201: + description: "Container created successfully" + schema: + type: "object" + required: [Id, Warnings] + properties: + Id: + description: "The ID of the created container" + type: "string" + x-nullable: false + Warnings: + description: "Warnings encountered when creating the container" + type: "array" + x-nullable: false + items: + type: "string" + examples: + application/json: + Id: "e90e34656806" + Warnings: [] + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 406: + description: "impossible to attach" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Container" + /containers/{id}/json: + get: + summary: "Inspect a container" + description: "Return low-level information about a container." + operationId: "GetContainerInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Id: + description: "The ID of the container" + type: "string" + Created: + description: "The time the container was created" + type: "string" + Path: + description: "The path to the command being run" + type: "string" + Args: + description: "The arguments to the command being run" + type: "array" + items: + type: "string" + State: + description: "The state of the container." + type: "object" + properties: + Status: + description: "The status of the container. For example, `running` or `exited`." + type: "string" + Running: + description: "Whether this container is running." + type: "boolean" + Paused: + description: "Whether this container is paused." + type: "boolean" + Restarting: + description: "Whether this container is restarting." + type: "boolean" + OOMKilled: + description: "Whether this container has been killed because it ran out of memory." + type: "boolean" + Dead: + type: "boolean" + Pid: + description: "The process ID of this container" + type: "integer" + ExitCode: + description: "The last exit code of this container" + type: "integer" + Error: + type: "string" + StartedAt: + description: "The time when this container was last started." + type: "string" + FinishedAt: + description: "The time when this container last exited." + type: "string" + Image: + description: "The container's image" + type: "string" + ResolvConfPath: + type: "string" + HostnamePath: + type: "string" + HostsPath: + type: "string" + LogPath: + type: "string" + Node: + description: "TODO" + type: "object" + Name: + type: "string" + RestartCount: + type: "integer" + Driver: + type: "string" + MountLabel: + type: "string" + ProcessLabel: + type: "string" + AppArmorProfile: + type: "string" + ExecIDs: + type: "string" + HostConfig: + $ref: "#/definitions/HostConfig" + GraphDriver: + $ref: "#/definitions/GraphDriver" + SizeRw: + description: "The size of files that have been created or changed by this container." + type: "integer" + format: "int64" + SizeRootFs: + description: "The total size of all the files in this container." + type: "integer" + format: "int64" + Mounts: + type: "array" + items: + $ref: "#/definitions/MountPoint" + Config: + $ref: "#/definitions/Config" + NetworkSettings: + $ref: "#/definitions/NetworkConfig" + examples: + application/json: + AppArmorProfile: "" + Args: + - "-c" + - "exit 9" + Config: + AttachStderr: true + AttachStdin: false + AttachStdout: true + Cmd: + - "/bin/sh" + - "-c" + - "exit 9" + Domainname: "" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Hostname: "ba033ac44011" + Image: "ubuntu" + Labels: + com.example.vendor: "Acme" + com.example.license: "GPL" + com.example.version: "1.0" + MacAddress: "" + NetworkDisabled: false + OpenStdin: false + StdinOnce: false + Tty: false + User: "" + Volumes: + /volumes/data: {} + WorkingDir: "" + StopSignal: "SIGTERM" + StopTimeout: 10 + Created: "2015-01-06T15:47:31.485331387Z" + Driver: "devicemapper" + HostConfig: + MaximumIOps: 0 + MaximumIOBps: 0 + BlkioWeight: 0 + BlkioWeightDevice: + - {} + BlkioDeviceReadBps: + - {} + BlkioDeviceWriteBps: + - {} + BlkioDeviceReadIOps: + - {} + BlkioDeviceWriteIOps: + - {} + ContainerIDFile: "" + CpusetCpus: "" + CpusetMems: "" + CpuPercent: 80 + CpuShares: 0 + CpuPeriod: 100000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + Devices: [] + IpcMode: "" + LxcConf: [] + Memory: 0 + MemorySwap: 0 + MemoryReservation: 0 + KernelMemory: 0 + OomKillDisable: false + OomScoreAdj: 500 + NetworkMode: "bridge" + PidMode: "" + PortBindings: {} + Privileged: false + ReadonlyRootfs: false + PublishAllPorts: false + RestartPolicy: + MaximumRetryCount: 2 + Name: "on-failure" + LogConfig: + Type: "json-file" + Sysctls: + net.ipv4.ip_forward: "1" + Ulimits: + - {} + VolumeDriver: "" + ShmSize: 67108864 + HostnamePath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hostname" + HostsPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/hosts" + LogPath: "/var/lib/docker/containers/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b/1eb5fabf5a03807136561b3c00adcd2992b535d624d5e18b6cdc6a6844d9767b-json.log" + Id: "ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39" + Image: "04c5d3b7b0656168630d3ba35d8889bd0e9caafcaeb3004d2bfbc47e7c5d35d2" + MountLabel: "" + Name: "/boring_euclid" + NetworkSettings: + Bridge: "" + SandboxID: "" + HairpinMode: false + LinkLocalIPv6Address: "" + LinkLocalIPv6PrefixLen: 0 + SandboxKey: "" + SecondaryIPAddresses: null + SecondaryIPv6Addresses: null + EndpointID: "" + Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + IPAddress: "" + IPPrefixLen: 0 + IPv6Gateway: "" + MacAddress: "" + Networks: + bridge: + NetworkID: "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812" + EndpointID: "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d" + Gateway: "172.17.0.1" + IPAddress: "172.17.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Path: "/bin/sh" + ProcessLabel: "" + ResolvConfPath: "/var/lib/docker/containers/ba033ac4401106a3b513bc9d639eee123ad78ca3616b921167cd74b20e25ed39/resolv.conf" + RestartCount: 1 + State: + Error: "" + ExitCode: 9 + FinishedAt: "2015-01-06T15:47:32.080254511Z" + OOMKilled: false + Dead: false + Paused: false + Pid: 0 + Restarting: false + Running: true + StartedAt: "2015-01-06T15:47:32.072697474Z" + Status: "running" + Mounts: + - Name: "fac362...80535" + Source: "/data" + Destination: "/data" + Driver: "local" + Mode: "ro,Z" + RW: false + Propagation: "" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "size" + in: "query" + type: "boolean" + default: false + description: "Return the size of container as fields `SizeRw` and `SizeRootFs`" + tags: + - "Container" + /containers/{id}/top: + get: + summary: "List processes running inside a container" + description: "On Unix systems, this is done by running the `ps` command. This endpoint is not supported on Windows." + operationId: "GetContainerTop" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Titles: + description: "The ps column titles" + type: "array" + items: + type: "string" + Processes: + description: "Each process running in the container, where each is process is an array of values corresponding to the titles" + type: "array" + items: + type: "array" + items: + type: "string" + examples: + application/json: + Titles: + - "UID" + - "PID" + - "PPID" + - "C" + - "STIME" + - "TTY" + - "TIME" + - "CMD" + Processes: + - + - "root" + - "13642" + - "882" + - "0" + - "17:03" + - "pts/0" + - "00:00:00" + - "/bin/bash" + - + - "root" + - "13735" + - "13642" + - "0" + - "17:06" + - "pts/0" + - "00:00:00" + - "sleep 10" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "ps_args" + in: "query" + description: "The arguments to pass to `ps`. For example, `aux`" + type: "string" + default: "-ef" + tags: + - "Container" + /containers/{id}/logs: + get: + summary: "Get container logs" + description: | + Get `stdout` and `stderr` logs from a container. + + Note: This endpoint works only for containers with the `json-file` or `journald` logging driver. + operationId: "GetContainerLogs" + responses: + 101: + description: "logs returned as a stream" + schema: + type: "string" + format: "binary" + 200: + description: "logs returned as a string in response body" + schema: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "follow" + in: "query" + description: | + Return the logs as a stream. + + This will return a `101` HTTP response with a `Connection: upgrade` header, then hijack the HTTP connection to send raw output. For more information about hijacking and the stream format, [see the documentation for the attach endpoint](#operation/PostContainerAttach). + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Return logs from `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Return logs from `stderr`" + type: "boolean" + default: false + - name: "since" + in: "query" + description: "Only return logs since this time, as a UNIX timestamp" + type: "integer" + default: 0 + - name: "timestamps" + in: "query" + description: "Add timestamps to every log line" + type: "boolean" + default: false + - name: "tail" + in: "query" + description: "Only return this number of log lines from the end of the logs. Specify as an integer or `all` to output all log lines." + type: "string" + default: "all" + tags: + - "Container" + /containers/{id}/changes: + get: + summary: "Get changes on a container’s filesystem" + description: | + Returns which files in a container's filesystem have been added, deleted, or modified. The `Kind` of modification can be one of: + + - `0`: Modified + - `1`: Added + - `2`: Deleted + operationId: "GetContainerChanges" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + type: "object" + properties: + Path: + description: "Path to file that has changed" + type: "string" + Kind: + description: "Kind of change" + type: "integer" + enum: + - 0 + - 1 + - 2 + examples: + application/json: + - Path: "/dev" + Kind: 0 + - Path: "/dev/kmsg" + Kind: 1 + - Path: "/test" + Kind: 1 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: + - "Container" + /containers/{id}/export: + get: + summary: "Export a container" + description: "Export the contents of a container as a tarball." + operationId: "GetContainerExport" + produces: + - "application/octet-stream" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: + - "Container" + /containers/{id}/stats: + get: + summary: "Get container stats based on resource usage" + description: | + This endpoint returns a live stream of a container’s resource usage statistics. + + The `precpu_stats` is the CPU statistic of last read, which is used for calculating the CPU usage percentage. It is not the same as the `cpu_stats` field. + operationId: "GetContainerStats" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + examples: + application/json: + read: "2015-01-08T22:57:31.547920715Z" + pids_stats: + current: 3 + networks: + eth0: + rx_bytes: 5338 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 36 + tx_bytes: 648 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 8 + eth5: + rx_bytes: 4641 + rx_dropped: 0 + rx_errors: 0 + rx_packets: 26 + tx_bytes: 690 + tx_dropped: 0 + tx_errors: 0 + tx_packets: 9 + memory_stats: + stats: + total_pgmajfault: 0 + cache: 0 + mapped_file: 0 + total_inactive_file: 0 + pgpgout: 414 + rss: 6537216 + total_mapped_file: 0 + writeback: 0 + unevictable: 0 + pgpgin: 477 + total_unevictable: 0 + pgmajfault: 0 + total_rss: 6537216 + total_rss_huge: 6291456 + total_writeback: 0 + total_inactive_anon: 0 + rss_huge: 6291456 + hierarchical_memory_limit: 67108864 + total_pgfault: 964 + total_active_file: 0 + active_anon: 6537216 + total_active_anon: 6537216 + total_pgpgout: 414 + total_cache: 0 + inactive_anon: 0 + active_file: 0 + pgfault: 964 + inactive_file: 0 + total_pgpgin: 477 + max_usage: 6651904 + usage: 6537216 + failcnt: 0 + limit: 67108864 + blkio_stats: {} + cpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24472255 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100215355 + usage_in_kernelmode: 30000000 + system_cpu_usage: 739306590000000 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + precpu_stats: + cpu_usage: + percpu_usage: + - 8646879 + - 24350896 + - 36438778 + - 30657443 + usage_in_usermode: 50000000 + total_usage: 100093996 + usage_in_kernelmode: 30000000 + system_cpu_usage: 9492140000000 + throttling_data: + periods: 0 + throttled_periods: 0 + throttled_time: 0 + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "stream" + in: "query" + description: "Stream the output. If false, the stats will be output once and then it will disconnect." + type: "boolean" + default: true + tags: + - "Container" + /containers/{id}/resize: + post: + summary: "Resize a container TTY" + description: "Resize the TTY for a container. You must restart the container for the resize to take effect." + operationId: "PostContainerResize" + consumes: + - "application/octet-stream" + produces: + - "text/plain" + responses: + 200: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "cannot resize container" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "h" + in: "query" + description: "Height of the tty session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the tty session in characters" + type: "integer" + tags: + - "Container" + /containers/{id}/start: + post: + summary: "Start a container" + operationId: "PostContainerStart" + responses: + 204: + description: "no error" + 304: + description: "container already started" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + tags: + - "Container" + /containers/{id}/stop: + post: + summary: "Stop a container" + operationId: "PostContainerStop" + responses: + 204: + description: "no error" + 304: + description: "container already stopped" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: + - "Container" + /containers/{id}/restart: + post: + summary: "Restart a container" + operationId: "PostContainerRestart" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "t" + in: "query" + description: "Number of seconds to wait before killing the container" + type: "integer" + tags: + - "Container" + /containers/{id}/kill: + post: + summary: "Kill a container" + description: "Send a POSIX signal to a container, defaulting to killing to the container." + operationId: "PostContainerKill" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "signal" + in: "query" + description: "Signal to send to the container as an integer or string (e.g. `SIGINT`)" + type: "string" + default: "SIGKILL" + tags: + - "Container" + /containers/{id}/update: + post: + summary: "Update a container" + description: "Change various configuration options of a container without having to recreate it." + operationId: "ContainerUpdate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "The container has been updated." + schema: + type: "object" + properties: + Warnings: + type: "array" + items: + type: "string" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "update" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/Resources" + - type: "object" + properties: + RestartPolicy: + $ref: "#/definitions/RestartPolicy" + example: + BlkioWeight: 300 + CpuShares: 512 + CpuPeriod: 100000 + CpuQuota: 50000 + CpuRealtimePeriod: 1000000 + CpuRealtimeRuntime: 10000 + CpusetCpus: "0,1" + CpusetMems: "0" + Memory: 314572800 + MemorySwap: 514288000 + MemoryReservation: 209715200 + KernelMemory: 52428800 + RestartPolicy: + MaximumRetryCount: 4 + Name: "on-failure" + tags: + - "Container" + /containers/{id}/rename: + post: + summary: "Rename a container" + operationId: "PostContainerRename" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "name already in use" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "name" + in: "query" + required: true + description: "New name for the container" + type: "string" + tags: + - "Container" + /containers/{id}/pause: + post: + summary: "Pause a container" + description: | + Use the cgroups freezer to suspend all processes in a container. + + Traditionally, when suspending a process the `SIGSTOP` signal is used, which is observable by the process being suspended. With the cgroups freezer the process is unaware, and unable to capture, that it is being suspended, and subsequently resumed. + operationId: "PostContainerPause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: + - "Container" + /containers/{id}/unpause: + post: + summary: "Unpause a container" + description: "Resume a container which has been paused." + operationId: "PostContainerUnpause" + responses: + 204: + description: "no error" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: + - "Container" + /containers/{id}/attach: + post: + summary: "Attach to a container" + description: | + Attach to a container to read its output or send it input. You can attach to the same container multiple times and you can reattach to containers that have been detached. + + Either the `stream` or `logs` parameter must be `true` for this endpoint to do anything. + + See [the documentation for the `docker attach` command](https://docs.docker.com/engine/reference/commandline/attach/) for more details. + + ### Hijacking + + This endpoint hijacks the HTTP connection to transport `stdin`, `stdout`, and `stderr` on the same socket. + + This is the response from the daemon for an attach request: + + ``` + HTTP/1.1 200 OK + Content-Type: application/vnd.docker.raw-stream + + [STREAM] + ``` + + After the headers and two new lines, the TCP connection can now be used for raw, bidirectional communication between the client and server. + + To hint potential proxies about connection hijacking, the Docker client can also optionally send connection upgrade headers. + + For example, the client sends this request to upgrade the connection: + + ``` + POST /containers/16253994b7c4/attach?stream=1&stdout=1 HTTP/1.1 + Upgrade: tcp + Connection: Upgrade + ``` + + The Docker daemon will respond with a `101 UPGRADED` response, and will similarly follow with the raw stream: + + ``` + HTTP/1.1 101 UPGRADED + Content-Type: application/vnd.docker.raw-stream + Connection: Upgrade + Upgrade: tcp + + [STREAM] + ``` + + ### Stream format + + When the TTY setting is disabled in [`POST /containers/create`](#operation/PostContainerCreate), the stream over the hijacked connected is multiplexed to separate out `stdout` and `stderr`. The stream consists of a series of frames, each containing a header and a payload. + + The header contains the information which the stream writes (`stdout` or `stderr`). It also contains the size of the associated frame encoded in the last four bytes (`uint32`). + + It is encoded on the first eight bytes like this: + + ```go + header := [8]byte{STREAM_TYPE, 0, 0, 0, SIZE1, SIZE2, SIZE3, SIZE4} + ``` + + `STREAM_TYPE` can be: + + - 0: `stdin` (is written on `stdout`) + - 1: `stdout` + - 2: `stderr` + + `SIZE1, SIZE2, SIZE3, SIZE4` are the four bytes of the `uint32` size encoded as big endian. + + Following the header is the payload, which is the specified number of bytes of `STREAM_TYPE`. + + The simplest way to implement this protocol is the following: + + 1. Read 8 bytes. + 2. Choose `stdout` or `stderr` depending on the first byte. + 3. Extract the frame size from the last four bytes. + 4. Read the extracted size and output it on the correct output. + 5. Goto 1. + + ### Stream format when using a TTY + + When the TTY setting is enabled in [`POST /containers/create`](#operation/PostContainerCreate), the stream is not multiplexed. The data exchanged over the hijacked connection is simply the raw data from the process PTY and client's `stdin`. + + operationId: "PostContainerAttach" + produces: + - "application/vnd.docker.raw-stream" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + type: "string" + - name: "logs" + in: "query" + description: | + Replay previous logs from the container. + + This is useful for attaching to a container that has started and you want to output everything since the container started. + + If `stream` is also enabled, once all the previous output has been returned, it will seamlessly transition into streaming current output. + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Stream attached streams from the the time the request was made onwards" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: + - "Container" + /containers/{id}/attach/ws: + get: + summary: "Attach to a container via a websocket" + operationId: "PostContainerAttachWebsocket" + responses: + 101: + description: "no error, hints proxy about hijacking" + 200: + description: "no error, no upgrade header found" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "detachKeys" + in: "query" + description: "Override the key sequence for detaching a container.Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,`, or `_`." + type: "string" + - name: "logs" + in: "query" + description: "Return logs" + type: "boolean" + default: false + - name: "stream" + in: "query" + description: "Return stream" + type: "boolean" + default: false + - name: "stdin" + in: "query" + description: "Attach to `stdin`" + type: "boolean" + default: false + - name: "stdout" + in: "query" + description: "Attach to `stdout`" + type: "boolean" + default: false + - name: "stderr" + in: "query" + description: "Attach to `stderr`" + type: "boolean" + default: false + tags: + - "Container" + /containers/{id}/wait: + post: + summary: "Wait for a container" + description: "Block until a container stops, then returns the exit code." + operationId: "ContainerWait" + produces: ["application/json"] + responses: + 200: + description: "The container has exit." + schema: + type: "object" + required: [StatusCode] + properties: + StatusCode: + description: "Exit code of the container" + type: "integer" + x-nullable: false + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + tags: + - "Container" + /containers/{id}: + delete: + summary: "Remove a container" + operationId: "DeleteContainer" + responses: + 204: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "v" + in: "query" + description: "Remove the volumes associated with the container." + type: "boolean" + default: false + - name: "force" + in: "query" + description: "If the container is running, kill it before removing it." + type: "boolean" + default: false + tags: + - "Container" + /containers/{id}/archive: + head: + summary: "Get information about files in a container" + description: "A response header `X-Docker-Container-Path-Stat` is return containing a base64 - encoded JSON object with some filesystem header information about the path." + operationId: "HeadContainerArchive" + responses: + 200: + description: "no error" + headers: + X-Docker-Container-Path-Stat: + type: "string" + description: "TODO" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: + - "Container" + get: + summary: "Get an archive of a filesystem resource in a container" + description: "Get an tar archive of a resource in the filesystem of container id." + operationId: "GetContainerArchive" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + 400: + description: "Bad parameter" + schema: + allOf: + - $ref: "#/definitions/ErrorResponse" + - type: "object" + properties: + message: + description: "The error message. Either \"must specify path parameter\" (path cannot be empty) or \"not a directory\" (path was asserted to be a directory but exists as a file)." + type: "string" + x-nullable: false + 404: + description: "Container or path does not exist" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Resource in the container’s filesystem to archive." + type: "string" + tags: + - "Container" + put: + summary: "Extract an archive of files or folders to a directory in a container" + description: "Upload a tar archive to be extracted to a path in the filesystem of container id." + operationId: "PutContainerArchive" + consumes: + - "application/x-tar" + - "application/octet-stream" + responses: + 200: + description: "The content was extracted successfully" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 403: + description: "Permission denied, the volume or container rootfs is marked as read-only." + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such container or path does not exist inside the container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + required: true + description: "ID or name of the container" + type: "string" + - name: "path" + in: "query" + required: true + description: "Path to a directory in the container to extract the archive’s contents into. " + type: "string" + - name: "noOverwriteDirNonDir" + in: "query" + description: "If “1”, “true”, or “True” then it will be an error if unpacking the given content would cause an existing directory to be replaced with a non-directory and vice versa." + type: "string" + - name: "inputStream" + in: "body" + required: true + description: "The input stream must be a tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + tags: + - "Container" + /containers/prune: + post: + summary: "Delete stopped containers" + consumes: + - "application/json" + produces: + - "application/json" + operationId: "ContainerPrune" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ContainersDeleted: + description: "Container IDs that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Container" + /images/json: + get: + summary: "List Images" + description: "Returns a list of images on the server. Note that it uses a different, smaller representation of an image than inspecting a single image." + operationId: "GetImageList" + produces: + - "application/json" + responses: + 200: + description: "Summary image data for the images matching the query" + schema: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + examples: + application/json: + - Id: "sha256:e216a057b1cb1efc11f8a268f37ef62083e70b1b38323ba252e25ac88904a7e8" + ParentId: "" + RepoTags: + - "ubuntu:12.04" + - "ubuntu:precise" + RepoDigests: + - "ubuntu@sha256:992069aee4016783df6345315302fa59681aae51a8eeb2f889dea59290f21787" + Created: 1474925151 + Size: 103579269 + VirtualSize: 103579269 + SharedSize: 0 + Labels: {} + Containers: 2 + - Id: "sha256:3e314f95dcace0f5e4fd37b10862fe8398e3c60ed36600bc0ca5fda78b087175" + ParentId: "" + RepoTags: + - "ubuntu:12.10" + - "ubuntu:quantal" + RepoDigests: + - "ubuntu@sha256:002fba3e3255af10be97ea26e476692a7ebed0bb074a9ab960b2e7a1526b15d7" + - "ubuntu@sha256:68ea0200f0b90df725d99d823905b04cf844f6039ef60c60bf3e019915017bd3" + Created: 1403128455 + Size: 172064416 + VirtualSize: 172064416 + SharedSize: 0 + Labels: {} + Containers: 5 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "all" + in: "query" + description: "Show all images. Only images from a final layer (no children) are shown by default." + type: "boolean" + default: false + - name: "filters" + in: "query" + description: "A JSON encoded value of the filters (a `map[string][]string`) to process on the containers list" + type: "string" + - name: "filter" + in: "query" + description: "Only return images with the specified name." + type: "string" + - name: "digests" + in: "query" + description: "Show digest information as a `RepoDigests` field on each image." + type: "boolean" + default: false + tags: + - "Image" + /build: + post: + summary: "Build an image" + description: | + Build an image from a tar archive with a `Dockerfile` in it. + + The `Dockerfile` specifies how the image is built from the tar archive. It is typically in the archive's root, but can be at a different path or have a different name by specifying the `dockerfile` parameter. [See the `Dockerfile` reference for more information](https://docs.docker.com/engine/reference/builder/). + + The Docker daemon performs a preliminary validation of the `Dockerfile` before starting the build, and returns an error if the syntax is incorrect. After that, each instruction is run one-by-one until the ID of the new image is output. + + The build is canceled if the client drops the connection by quitting or being killed. + operationId: "PostImageBuild" + consumes: + - "application/octet-stream" + produces: + - "application/json" + parameters: + - name: "inputStream" + in: "body" + description: "A tar archive compressed with one of the following algorithms: identity (no compression), gzip, bzip2, xz." + schema: + type: "string" + format: "binary" + - name: "dockerfile" + in: "query" + description: "Path within the build context to the `Dockerfile`. This is ignored if `remote` is specified and points to an external `Dockerfile`." + type: "string" + default: "Dockerfile" + - name: "t" + in: "query" + description: "A name and optional tag to apply to the image in the `name:tag` format. If you omit the tag the default `latest` value is assumed. You can provide several `t` parameters." + type: "string" + - name: "remote" + in: "query" + description: "A Git repository URI or HTTP/HTTPS context URI. If the URI points to a single text file, the file’s contents are placed into a file called `Dockerfile` and the image is built from that file. If the URI points to a tarball, the file is downloaded by the daemon and the contents therein used as the context for the build. If the URI points to a tarball and the `dockerfile` parameter is also specified, there must be a file with the corresponding path inside the tarball." + type: "string" + - name: "q" + in: "query" + description: "Suppress verbose build output." + type: "boolean" + default: false + - name: "nocache" + in: "query" + description: "Do not use the cache when building the image." + type: "boolean" + default: false + - name: "cachefrom" + in: "query" + description: "JSON array of images used for build cache resolution." + type: "string" + - name: "pull" + in: "query" + description: "Attempt to pull the image even if an older image exists locally." + type: "string" + - name: "rm" + in: "query" + description: "Remove intermediate containers after a successful build." + type: "boolean" + default: true + - name: "forcerm" + in: "query" + description: "Always remove intermediate containers, even upon failure." + type: "boolean" + default: false + - name: "memory" + in: "query" + description: "Set memory limit for build." + type: "integer" + - name: "memswap" + in: "query" + description: "Total memory (memory + swap). Set as `-1` to disable swap." + type: "integer" + - name: "cpushares" + in: "query" + description: "CPU shares (relative weight)." + type: "integer" + - name: "cpusetcpus" + in: "query" + description: "CPUs in which to allow execution (e.g., `0-3`, `0,1`)." + type: "string" + - name: "cpuperiod" + in: "query" + description: "The length of a CPU period in microseconds." + type: "integer" + - name: "cpuquota" + in: "query" + description: "Microseconds of CPU time that the container can get in a CPU period." + type: "integer" + - name: "buildargs" + in: "query" + description: "JSON map of string pairs for build-time variables. Users pass these values at build-time. Docker uses the buildargs as the environment context for commands run via the `Dockerfile` RUN instruction, or for variable expansion in other `Dockerfile` instructions. This is not meant for passing secret values. [Read more about the buildargs instruction.](https://docs.docker.com/engine/reference/builder/#arg)" + type: "integer" + - name: "shmsize" + in: "query" + description: "Size of `/dev/shm` in bytes. The size must be greater than 0. If omitted the system uses 64MB." + type: "integer" + - name: "squash" + in: "query" + description: "Squash the resulting images layers into a single layer. *(Experimental release only.)*" + type: "boolean" + - name: "labels" + in: "query" + description: "Arbitrary key/value labels to set on the image, as a JSON map of string pairs." + type: "string" + - name: "networkmode" + in: "query" + description: "Sets the networking mode for the run commands during + build. Supported standard values are: `bridge`, `host`, `none`, and + `container:`. Any other value is taken as a custom network's + name to which this container should connect to." + type: "string" + - name: "Content-type" + in: "header" + type: "string" + enum: + - "application/tar" + default: "application/tar" + - name: "X-Registry-Config" + in: "header" + description: | + This is a base64-encoded JSON object with auth configurations for multiple registries that a build may refer to. + + The key is a registry URL, and the value is an auth configuration object, [as described in the authentication section](#section/Authentication). For example: + + ``` + { + "docker.example.com": { + "username": "janedoe", + "password": "hunter2" + }, + "https://index.docker.io/v1/": { + "username": "mobydock", + "password": "conta1n3rize14" + } + } + ``` + + Only the registry domain name (and port if not the default 443) are required. However, for legacy reasons, the Docker Hub registry must be specified with both a `https://` prefix and a `/v1/` suffix even though Docker will prefer to use the v2 registry API. + type: "string" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Image" + /images/create: + post: + summary: "Create an image" + description: "Create an image by either pulling it from a registry or importing it." + operationId: "PostImageCreate" + consumes: + - "text/plain" + - "application/octet-stream" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "fromImage" + in: "query" + description: "Name of the image to pull. The name may include a tag or digest. This parameter may only be used when pulling an image. The pull is cancelled if the HTTP connection is closed." + type: "string" + - name: "fromSrc" + in: "query" + description: "Source to import. The value may be a URL from which the image can be retrieved or `-` to read the image from the request body. This parameter may only be used when importing an image." + type: "string" + - name: "repo" + in: "query" + description: "Repository name given to an image when it is imported. The repo may include a tag. This parameter may only be used when importing an image." + type: "string" + - name: "tag" + in: "query" + description: "Tag or digest. If empty when pulling an image, this causes all tags for the given image to be pulled." + type: "string" + - name: "inputImage" + in: "body" + description: "Image content if the value `-` has been specified in fromSrc query parameter" + schema: + type: "string" + required: false + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: + - "Image" + /images/{name}/json: + get: + summary: "Inspect an image" + description: "Return low-level information about an image." + operationId: "GetImageInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Image" + examples: + application/json: + Id: "sha256:85f05633ddc1c50679be2b16a0479ab6f7637f8884e0cfe0f4d20e1ebb3d6e7c" + Container: "cb91e48a60d01f1e27028b4fc6819f4f290b3cf12496c8176ec714d0d390984a" + Comment: "" + Os: "linux" + Architecture: "amd64" + Parent: "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + ContainerConfig: + Tty: false + Hostname: "e611e15f9c9d" + Domainname: "" + AttachStdout: false + PublishService: "" + AttachStdin: false + OpenStdin: false + StdinOnce: false + NetworkDisabled: false + OnBuild: [] + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + User: "" + WorkingDir: "" + MacAddress: "" + AttachStderr: false + Labels: + com.example.license: "GPL" + com.example.version: "1.0" + com.example.vendor: "Acme" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Cmd: + - "/bin/sh" + - "-c" + - "#(nop) LABEL com.example.vendor=Acme com.example.license=GPL com.example.version=1.0" + DockerVersion: "1.9.0-dev" + VirtualSize: 188359297 + Size: 0 + Author: "" + Created: "2015-09-10T08:30:53.26995814Z" + GraphDriver: + Name: "aufs" + RepoDigests: + - "localhost:5000/test/busybox/example@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf" + RepoTags: + - "example:1.0" + - "example:latest" + - "example:stable" + Config: + Image: "91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c" + NetworkDisabled: false + OnBuild: [] + StdinOnce: false + PublishService: "" + AttachStdin: false + OpenStdin: false + Domainname: "" + AttachStdout: false + Tty: false + Hostname: "e611e15f9c9d" + Cmd: + - "/bin/bash" + Env: + - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" + Labels: + com.example.vendor: "Acme" + com.example.version: "1.0" + com.example.license: "GPL" + MacAddress: "" + AttachStderr: false + WorkingDir: "" + User: "" + RootFS: + Type: "layers" + Layers: + - "sha256:1834950e52ce4d5a88a1bbd131c537f4d0e56d10ff0dd69e66be3b7dfa9df7e6" + - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such image: someimage (tag: latest)" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or id" + type: "string" + required: true + tags: + - "Image" + /images/{name}/history: + get: + summary: "Get the history of an image" + description: "Return parent layers of an image." + operationId: "GetImageHistory" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + Id: + type: "string" + Created: + type: "integer" + format: "int64" + CreatedBy: + type: "string" + Tags: + type: "array" + items: + type: "string" + Size: + type: "integer" + format: "int64" + Comment: + type: "string" + examples: + application/json: + - Id: "3db9c44f45209632d6050b35958829c3a2aa256d81b9a7be45b362ff85c54710" + Created: 1398108230 + CreatedBy: "/bin/sh -c #(nop) ADD file:eb15dbd63394e063b805a3c32ca7bf0266ef64676d5a6fab4801f2e81e2a5148 in /" + Tags: + - "ubuntu:lucid" + - "ubuntu:10.04" + Size: 182964289 + Comment: "" + - Id: "6cfa4d1f33fb861d4d114f43b25abd0ac737509268065cdfd69d544a59c85ab8" + Created: 1398108222 + CreatedBy: "/bin/sh -c #(nop) MAINTAINER Tianon Gravi - mkimage-debootstrap.sh -i iproute,iputils-ping,ubuntu-minimal -t lucid.tar.xz lucid http://archive.ubuntu.com/ubuntu/" + Tags: [] + Size: 0 + Comment: "" + - Id: "511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158" + Created: 1371157430 + CreatedBy: "" + Tags: + - "scratch12:latest" + - "scratch:latest" + Size: 0 + Comment: "Imported from -" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: + - "Image" + /images/{name}/push: + post: + summary: "Push an image" + description: | + Push an image to a registry. + + If you wish to push an image on to a private registry, that image must already have a tag which references the registry. For example, `registry.example.com/myimage:latest`. + + The push is cancelled if the HTTP connection is closed. + operationId: "PostImagePush" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID." + type: "string" + required: true + - name: "tag" + in: "query" + description: "The tag to associate with the image on the registry." + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration. [See the authentication section for details.](#section/Authentication)" + type: "string" + required: true + tags: + - "Image" + /images/{name}/tag: + post: + summary: "Tag an image" + description: "Tag an image so that it becomes part of a repository." + operationId: "PostImageTag" + responses: + 201: + description: "No error" + 400: + description: "Bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID to tag." + type: "string" + required: true + - name: "repo" + in: "query" + description: "The repository to tag in. For example, `someuser/someimage`." + type: "string" + - name: "tag" + in: "query" + description: "The name of the new tag." + type: "string" + tags: + - "Image" + /images/{name}: + delete: + summary: "Remove an image" + description: | + Remove an image, along with any untagged parent images that were referenced by that image. + + Images can't be removed if they have descendant images, are being used by a running container or are being used by a build. + operationId: "DeleteImage" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + examples: + application/json: + - Untagged: "3e2f21a89f" + - Deleted: "3e2f21a89f" + - Deleted: "53b4f83ac9" + 404: + description: "No such image" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Conflict" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + - name: "force" + in: "query" + description: "Remove the image even if it is being used by stopped containers or has other tags" + type: "boolean" + default: false + - name: "noprune" + in: "query" + description: "Do not delete untagged parent images" + type: "boolean" + default: false + tags: + - "Image" + /images/search: + get: + summary: "Search images" + description: "Search for an image on Docker Hub." + operationId: "GetImageSearch" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + type: "object" + properties: + description: + type: "string" + is_official: + type: "boolean" + is_automated: + type: "boolean" + name: + type: "string" + star_count: + type: "integer" + examples: + application/json: + - description: "" + is_official: false + is_automated: false + name: "wma55/u1210sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "jdswinbank/sshd" + star_count: 0 + - description: "" + is_official: false + is_automated: false + name: "vgauthier/sshd" + star_count: 0 + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "term" + in: "query" + description: "Term to search" + type: "string" + required: true + - name: "limit" + in: "query" + description: "Maximum number of results to return" + type: "integer" + - name: "filters" + in: "query" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the images list. Available filters: + + - `stars=` + - `is-automated=(true|false)` + - `is-official=(true|false)` + type: "string" + tags: + - "Image" + /images/prune: + post: + summary: "Delete unused images" + consumes: + - "application/json" + produces: + - "application/json" + operationId: "ImagePrune" + parameters: + - name: "body" + in: "body" + schema: + type: "object" + properties: + DanglingOnly: + description: "Only delete unused *and* untagged images" + type: "boolean" + default: false + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ImagesDeleted: + description: "Images that were deleted" + type: "array" + items: + $ref: "#/definitions/ImageDeleteResponse" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Image" + /auth: + post: + summary: "Check auth configuration" + description: "Validate credentials for a registry and, if available, get an identity token for accessing the registry without password." + operationId: "Authenticate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 200: + description: "An identity token was generated successfully." + schema: + type: "object" + required: [Status] + properties: + Status: + description: "The status of the authentication" + type: "string" + x-nullable: false + IdentityToken: + description: "An opaque token used to authenticate a user after a successful login" + type: "string" + x-nullable: false + examples: + application/json: + Status: "Login Succeeded" + IdentityToken: "9cbaf023786cd7..." + 204: + description: "No error" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "authConfig" + in: "body" + description: "Authentication to check" + schema: + $ref: "#/definitions/AuthConfig" + tags: ["Registry"] + /info: + get: + summary: "Get system information" + operationId: "getSystemInformation" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + Architecture: + type: "string" + Containers: + type: "integer" + ContainersRunning: + type: "integer" + ContainersStopped: + type: "integer" + ContainersPaused: + type: "integer" + CpuCfsPeriod: + type: "boolean" + CpuCfsQuota: + type: "boolean" + Debug: + type: "boolean" + DiscoveryBackend: + type: "string" + DockerRootDir: + type: "string" + Driver: + type: "string" + DriverStatus: + type: "array" + items: + type: "array" + items: + type: "string" + SystemStatus: + type: "array" + items: + type: "array" + items: + type: "string" + Plugins: + type: "object" + properties: + Volume: + type: "array" + items: + type: "string" + Network: + type: "array" + items: + type: "string" + ExperimentalBuild: + type: "boolean" + HttpProxy: + type: "string" + HttpsProxy: + type: "string" + ID: + type: "string" + IPv4Forwarding: + type: "boolean" + Images: + type: "integer" + IndexServerAddress: + type: "string" + InitPath: + type: "string" + InitSha1: + type: "string" + KernelVersion: + type: "string" + Labels: + type: "array" + items: + type: "string" + MemTotal: + type: "integer" + MemoryLimit: + type: "boolean" + NCPU: + type: "integer" + NEventsListener: + type: "integer" + NFd: + type: "integer" + NGoroutines: + type: "integer" + Name: + type: "string" + NoProxy: + type: "string" + OomKillDisable: + type: "boolean" + OSType: + type: "string" + OomScoreAdj: + type: "integer" + OperatingSystem: + type: "string" + RegistryConfig: + type: "object" + properties: + IndexConfigs: + type: "object" + additionalProperties: + type: "object" + properties: + Mirrors: + type: "array" + items: + type: "string" + Name: + type: "string" + Official: + type: "boolean" + Secure: + type: "boolean" + InsecureRegistryCIDRs: + type: "array" + items: + type: "string" + SwapLimit: + type: "boolean" + SystemTime: + type: "string" + ServerVersion: + type: "string" + examples: + application/json: + Architecture: "x86_64" + ClusterStore: "etcd://localhost:2379" + CgroupDriver: "cgroupfs" + Containers: 11 + ContainersRunning: 7 + ContainersStopped: 3 + ContainersPaused: 1 + CpuCfsPeriod: true + CpuCfsQuota: true + Debug: false + DockerRootDir: "/var/lib/docker" + Driver: "btrfs" + DriverStatus: + - + - "" + ExperimentalBuild: false + HttpProxy: "http://test:test@localhost:8080" + HttpsProxy: "https://test:test@localhost:8080" + ID: "7TRN:IPZB:QYBB:VPBQ:UMPP:KARE:6ZNR:XE6T:7EWV:PKF4:ZOJD:TPYS" + IPv4Forwarding: true + Images: 16 + IndexServerAddress: "https://index.docker.io/v1/" + InitPath: "/usr/bin/docker" + InitSha1: "" + KernelMemory: true + KernelVersion: "3.12.0-1-amd64" + Labels: + - "storage=ssd" + MemTotal: 2099236864 + MemoryLimit: true + NCPU: 1 + NEventsListener: 0 + NFd: 11 + NGoroutines: 21 + Name: "prod-server-42" + NoProxy: "9.81.1.160" + OomKillDisable: true + OSType: "linux" + OperatingSystem: "Boot2Docker" + Plugins: + Volume: + - "local" + Network: + - "null" + - "host" + - "bridge" + RegistryConfig: + IndexConfigs: + docker.io: + Name: "docker.io" + Official: true + Secure: true + InsecureRegistryCIDRs: + - "127.0.0.0/8" + SecurityOptions: + - Key: "Name" + Value: "seccomp" + - Key: "Profile" + Value: "default" + - Key: "Name" + Value: "apparmor" + - Key: "Name" + Value: "selinux" + - Key: "Name" + Value: "userns" + ServerVersion: "1.9.0" + SwapLimit: false + SystemStatus: + - + - "State" + - "Healthy" + SystemTime: "2015-03-10T11:11:23.730591467-07:00" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Misc" + /version: + get: + summary: "Get version" + description: "Returns the version of Docker that is running and various information about the system that Docker is running on." + operationId: "getVersion" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Version: + type: "string" + ApiVersion: + type: "string" + MinAPIVersion: + type: "string" + GitCommit: + type: "string" + GoVersion: + type: "string" + Os: + type: "string" + Arch: + type: "string" + KernelVersion: + type: "string" + Experimental: + type: "boolean" + BuildTime: + type: "string" + examples: + application/json: + Version: "1.13.0" + Os: "linux" + KernelVersion: "3.19.0-23-generic" + GoVersion: "go1.6.3" + GitCommit: "deadbee" + Arch: "amd64" + ApiVersion: "1.25" + MinAPIVersion: "1.12" + BuildTime: "2016-06-14T07:09:13.444803460+00:00" + Experimental: true + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Misc" + /_ping: + get: + summary: "Ping" + description: "This is a dummy endpoint you can use to test if the server is accessible." + operationId: "ping" + produces: + - "text/plain" + responses: + 200: + description: "no error" + schema: + type: "string" + example: "OK" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Misc" + /commit: + post: + summary: "Create a new image from a container" + operationId: "PostImageCommit" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "containerConfig" + in: "body" + description: "The container configuration" + schema: + $ref: "#/definitions/Config" + - name: "container" + in: "query" + description: "The ID or name of the container to commit" + type: "string" + - name: "repo" + in: "query" + description: "Repository name for the created image" + type: "string" + - name: "tag" + in: "query" + description: "Tag name for the create image" + type: "string" + - name: "comment" + in: "query" + description: "Commit message" + type: "string" + - name: "author" + in: "query" + description: "Author of the image (e.g., `John Hannibal Smith `)" + type: "string" + - name: "pause" + in: "query" + description: "Whether to pause the container before committing" + type: "boolean" + default: true + - name: "changes" + in: "query" + description: "`Dockerfile` instructions to apply while committing" + type: "string" + tags: + - "Image" + /events: + get: + summary: "Monitor events" + description: | + Stream real-time events from the server. + + Various objects within Docker report events when something happens to them. + + Containers report these events: `attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update` + + Images report these events: `delete, import, load, pull, push, save, tag, untag` + + Volumes report these events: `create, mount, unmount, destroy` + + Networks report these events: `create, connect, disconnect, destroy` + + The Docker daemon reports these events: `reload` + + operationId: "getEvents" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + Type: + description: "The type of object emitting the event" + type: "string" + Action: + description: "The type of event" + type: "string" + Actor: + type: "object" + properties: + ID: + description: "The ID of the object emitting the event" + type: "string" + Attributes: + description: "Various key/value attributes of the object, depending on its type" + type: "object" + additionalProperties: + type: "string" + time: + description: "Timestamp of event" + type: "integer" + timeNano: + description: "Timestamp of event, with nanosecond accuracy" + type: "integer" + format: "int64" + examples: + application/json: + Type: "container" + Action: "create" + Actor: + ID: "ede54ee1afda366ab42f824e8a5ffd195155d853ceaec74a927f249ea270c743" + Attributes: + com.example.some-label: "some-label-value" + image: "alpine" + name: "my-container" + time: 1461943101 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "since" + in: "query" + description: "Show events created since this timestamp then stream new events." + type: "string" + - name: "until" + in: "query" + description: "Show events created until this timestamp then stop streaming." + type: "string" + - name: "filters" + in: "query" + description: | + A JSON encoded value of filters (a `map[string][]string`) to process on the event list. Available filters: + + - `container=` container name or ID + - `event=` event type + - `image=` image name or ID + - `label=` image or container label + - `type=` object to filter by, one of `container`, `image`, `volume`, `network`, or `daemon` + - `volume=` volume name or ID + - `network=` network name or ID + - `daemon=` daemon name or ID + type: "string" + tags: + - "Misc" + /system/df: + get: + summary: "Get data usage information" + responses: + 200: + description: "no error" + schema: + type: "object" + properties: + LayersSize: + type: "integer" + format: "int64" + Images: + type: "array" + items: + $ref: "#/definitions/ImageSummary" + Containers: + type: "array" + items: + $ref: "#/definitions/ContainerSummary" + Volumes: + type: "array" + items: + $ref: "#/definitions/Volume" + example: + LayersSize: 1092588 + Images: + - + Id: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + ParentId: "" + RepoTags: + - "busybox:latest" + RepoDigests: + - "busybox@sha256:a59906e33509d14c036c8678d687bd4eec81ed7c4b8ce907b888c607f6a1e0e6" + Created: 1466724217 + Size: 1092588 + SharedSize: 0 + VirtualSize: 1092588 + Labels: {} + Containers: 1 + Containers: + - + Id: "e575172ed11dc01bfce087fb27bee502db149e1a0fad7c296ad300bbff178148" + Names: + - "/top" + Image: "busybox" + ImageID: "sha256:2b8fd9751c4c0f5dd266fcae00707e67a2545ef34f9a29354585f93dac906749" + Command: "top" + Created: 1472592424 + Ports: [] + SizeRootFs: 1092588 + Labels: {} + State: "exited" + Status: "Exited (0) 56 minutes ago" + HostConfig: + NetworkMode: "default" + NetworkSettings: + Networks: + bridge: + IPAMConfig: null + Links: null + Aliases: null + NetworkID: "d687bc59335f0e5c9ee8193e5612e8aee000c8c62ea170cfb99c098f95899d92" + EndpointID: "8ed5115aeaad9abb174f68dcf135b49f11daf597678315231a32ca28441dec6a" + Gateway: "172.18.0.1" + IPAddress: "172.18.0.2" + IPPrefixLen: 16 + IPv6Gateway: "" + GlobalIPv6Address: "" + GlobalIPv6PrefixLen: 0 + MacAddress: "02:42:ac:12:00:02" + Mounts: [] + Volumes: + - + Name: "my-volume" + Driver: "local" + Mountpoint: "" + Labels: null + Scope: "" + Options: null + UsageData: + Size: 0 + RefCount: 0 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Misc" + /images/{name}/get: + get: + summary: "Export an image" + description: | + Get a tarball containing all images and metadata for a repository. + + If `name` is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned. If `name` is an image ID, similarly only that image (and its parents) are returned, but with the exclusion of the `repositories` file in the tarball, as there were no image names referenced. + + ### Image tarball format + + An image tarball contains one directory per image layer (named using its long ID), each containing these files: + + - `VERSION`: currently `1.0` - the file format version + - `json`: detailed layer information, similar to `docker inspect layer_id` + - `layer.tar`: A tarfile containing the filesystem changes in this layer + + The `layer.tar` file contains `aufs` style `.wh..wh.aufs` files and directories for storing attribute changes and deletions. + + If the tarball defines a repository, the tarball should also include a `repositories` file at the root that contains a list of repository and tag names mapped to layer IDs. + + ```json + { + "hello-world": { + "latest": "565a9d68a73f6706862bfe8409a7f659776d4d60a8d096eb4a3cbce6999cc2a1" + } + } + ``` + operationId: "GetImage" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "Image name or ID" + type: "string" + required: true + tags: + - "Image" + /images/get: + get: + summary: "Export several images" + description: | + Get a tarball containing all images and metadata for several image repositories. + + For each value of the `names` parameter: if it is a specific name and tag (e.g. `ubuntu:latest`), then only that image (and its parents) are returned; if it is an image ID, similarly only that image (and its parents) are returned and there would be no names referenced in the 'repositories' file for this image ID. + + For details on the format, see [the export image endpoint](#operation/GetImage). + operationId: "GetImageSaveAll" + produces: + - "application/x-tar" + responses: + 200: + description: "no error" + schema: + type: "string" + format: "binary" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "names" + in: "query" + description: "Image names to filter by" + type: "array" + items: + type: "string" + tags: + - "Image" + /images/load: + post: + summary: "Import images" + description: | + Load a set of images and tags into a repository. + + For details on the format, see [the export image endpoint](#operation/GetImage). + operationId: "PostImageLoad" + consumes: + - "application/x-tar" + produces: + - "application/json" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "imagesTarball" + in: "body" + description: "Tar archive containing images" + schema: + type: "string" + format: "binary" + - name: "quiet" + in: "query" + description: "Suppress progress details during load." + type: "boolean" + default: false + tags: + - "Image" + /containers/{id}/exec: + post: + summary: "Create an exec instance" + description: "Run a command inside a running container." + operationId: "PostContainerExec" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + $ref: "#/definitions/IdResponse" + 404: + description: "no such container" + schema: + $ref: "#/definitions/ErrorResponse" + examples: + application/json: + message: "No such container: c2ada9df5af8" + 409: + description: "container is paused" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execConfig" + in: "body" + description: "Exec configuration" + schema: + type: "object" + properties: + AttachStdin: + type: "boolean" + description: "Attach to `stdin` of the exec command." + AttachStdout: + type: "boolean" + description: "Attach to `stdout` of the exec command." + AttachStderr: + type: "boolean" + description: "Attach to `stderr` of the exec command." + DetachKeys: + type: "string" + description: "Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + Env: + description: "A list of environment variables in the form `[\"VAR=value\", ...]`." + type: "array" + items: + type: "string" + Cmd: + type: "array" + description: "Command to run, as a string or array of strings." + items: + type: "string" + Privileged: + type: "boolean" + description: "Runs the exec process with extended privileges." + default: false + User: + type: "string" + description: "The user, and optionally, group to run the exec process inside the container. Format is one of: `user`, `user:group`, `uid`, or `uid:gid`." + example: + AttachStdin: false + AttachStdout: true + AttachStderr: true + DetachKeys: "ctrl-p,ctrl-q" + Tty: false + Cmd: + - "date" + Env: + - "FOO=bar" + - "BAZ=quux" + required: true + - name: "id" + in: "path" + description: "ID or name of container" + type: "string" + required: true + tags: + - "Exec" + /exec/{id}/start: + post: + summary: "Start an exec instance" + description: "Starts a previously set up exec instance. If detach is true, this endpoint returns immediately after starting the command. Otherwise, it sets up an interactive session with the command." + operationId: "PostExecStart" + consumes: + - "application/json" + produces: + - "application/vnd.docker.raw-stream" + responses: + 200: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Container is stopped or paused" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "execStartConfig" + in: "body" + schema: + type: "object" + properties: + Detach: + type: "boolean" + description: "Detach from the command." + Tty: + type: "boolean" + description: "Allocate a pseudo-TTY." + example: + Detach: false + Tty: false + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: + - "Exec" + /exec/{id}/resize: + post: + summary: "Resize an exec instance" + description: "Resize the TTY session used by an exec instance. This endpoint only works if `tty` was specified as part of creating and starting the exec instance." + operationId: "PostExecResize" + responses: + 201: + description: "No error" + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + - name: "h" + in: "query" + description: "Height of the TTY session in characters" + type: "integer" + - name: "w" + in: "query" + description: "Width of the TTY session in characters" + type: "integer" + tags: + - "Exec" + /exec/{id}/json: + get: + summary: "Inspect an exec instance" + description: "Return low-level information about an exec instance." + operationId: "PostExecInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + ID: + type: "string" + Running: + type: "boolean" + ExitCode: + type: "integer" + ProcessConfig: + $ref: "#/definitions/ProcessConfig" + OpenStdin: + type: "boolean" + OpenStderr: + type: "boolean" + OpenStdout: + type: "boolean" + ContainerID: + type: "string" + Pid: + type: "integer" + description: "The system process ID for the exec process." + examples: + application/json: + CanRemove: false + ContainerID: "b53ee82b53a40c7dca428523e34f741f3abc51d9f297a14ff874bf761b995126" + DetachKeys: "" + ExitCode: 2 + ID: "f33bbfb39f5b142420f4759b2348913bd4a8d1a6d7fd56499cb41a1bb91d7b3b" + OpenStderr: true + OpenStdin: true + OpenStdout: true + ProcessConfig: + arguments: + - "-c" + - "exit 2" + entrypoint: "sh" + privileged: false + tty: true + user: "1000" + Running: false + Pid: 42000 + 404: + description: "No such exec instance" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Exec instance ID" + required: true + type: "string" + tags: + - "Exec" + + /volumes: + get: + summary: "List volumes" + operationId: "VolumesList" + produces: ["application/json"] + responses: + 200: + description: "Summary volume data that matches the query" + schema: + type: "object" + required: [Volumes, Warnings] + properties: + Volumes: + type: "array" + x-nullable: false + description: "List of volumes" + items: + $ref: "#/definitions/Volume" + Warnings: + type: "array" + x-nullable: false + description: "Warnings that occurred when fetching the list of volumes" + items: + type: "string" + + examples: + application/json: + Volumes: + - Name: "tardis" + Driver: "local" + Mountpoint: "/var/lib/docker/volumes/tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Scope: "local" + Options: + device: "tmpfs" + o: "size=100m,uid=1000" + type: "tmpfs" + Warnings: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to + process on the volumes list. Available filters: + + - `name=` Matches all or part of a volume name. + - `dangling=` When set to `true` (or `1`), returns all + volumes that are not in use by a container. When set to `false` + (or `0`), only volumes that are in use by one or more + containers are returned. + - `driver=` Matches all or part of a volume + driver name. + type: "string" + format: "json" + tags: ["Volume"] + + /volumes/create: + post: + summary: "Create a volume" + operationId: "VolumesCreate" + consumes: ["application/json"] + produces: ["application/json"] + responses: + 201: + description: "The volume was created successfully" + schema: + $ref: "#/definitions/Volume" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "volumeConfig" + in: "body" + required: true + description: "Volume configuration" + schema: + type: "object" + properties: + Name: + description: "The new volume's name. If not specified, Docker generates a name." + type: "string" + x-nullable: false + Driver: + description: "Name of the volume driver to use." + type: "string" + default: "local" + x-nullable: false + DriverOpts: + description: "A mapping of driver options and values. These options are passed directly to the driver and are driver specific." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "tardis" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + Driver: "custom" + tags: ["Volume"] + + /volumes/{name}: + get: + summary: "Inspect a volume" + operationId: "VolumesInspect" + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Volume" + 404: + description: "No such volume" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + tags: ["Volume"] + + delete: + summary: "Remove a volume" + description: "Instruct the driver to remove the volume." + operationId: "VolumesDelete" + responses: + 204: + description: "The volume was removed" + 404: + description: "No such volume or volume driver" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "Volume is in use and cannot be removed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + required: true + description: "Volume name or ID" + type: "string" + - name: "force" + in: "query" + description: "Force the removal of the volume" + type: "boolean" + default: false + tags: ["Volume"] + /volumes/prune: + post: + summary: "Delete unused volumes" + consumes: + - "application/json" + produces: + - "application/json" + operationId: "VolumePrune" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + VolumesDeleted: + description: "Volumes that were deleted" + type: "array" + items: + type: "string" + SpaceReclaimed: + description: "Disk space reclaimed in bytes" + type: "integer" + format: "int64" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Volume" + /networks: + get: + summary: "List networks" + operationId: "NetworksList" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Network" + examples: + application/json: + - Name: "bridge" + Id: "f2de39df4171b0dc801e8002d1d999b77256983dfc63041c0f34030aa3977566" + Created: "2016-10-19T06:21:00.416543526Z" + Scope: "local" + Driver: "bridge" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: + - + Subnet: "172.17.0.0/16" + Containers: + 39b69226f9d79f5634485fb236a23b2fe4e96a0a94128390a7fbbcc167065867: + EndpointID: "ed2419a97c1d9954d05b46e462e7002ea552f216e9b136b80a7db8d98b442eda" + MacAddress: "02:42:ac:11:00:02" + IPv4Address: "172.17.0.2/16" + IPv6Address: "" + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + - Name: "none" + Id: "e086a3893b05ab69242d3c44e49483a3bbbd3a26b46baa8f61ab797c1088d794" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "null" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + - Name: "host" + Id: "13e871235c677f196c4e1ecebb9dc733b9b2d2ab589e30c539efeda84a24215e" + Created: "0001-01-01T00:00:00Z" + Scope: "local" + Driver: "host" + EnableIPv6: false + Internal: false + IPAM: + Driver: "default" + Config: [] + Containers: {} + Options: {} + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + JSON encoded value of the filters (a `map[string][]string`) to process on the networks list. Available filters: + + - `driver=` Matches a network's driver. + - `id=` Matches all or part of a network ID. + - `label=` or `label==` of a network label. + - `name=` Matches all or part of a network name. + - `type=["custom"|"builtin"]` Filters networks by type. The `custom` keyword returns all user-defined networks. + type: "string" + tags: ["Network"] + + /networks/{id}: + get: + summary: "Inspect a network" + operationId: "NetworksInspect" + produces: + - "application/json" + responses: + 200: + description: "No error" + schema: + $ref: "#/definitions/Network" + 404: + description: "Network not found" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + delete: + summary: "Remove a network" + operationId: "DeleteNetworks" + responses: + 204: + description: "No error" + 404: + description: "no such network" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + tags: ["Network"] + + /networks/create: + post: + summary: "Create a network" + operationId: "NetworksCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "No error" + schema: + type: "object" + properties: + Id: + description: "The ID of the created network." + type: "string" + Warning: + type: "string" + example: + Id: "22be93d5babb089c5aab8dbc369042fad48ff791584ca2da2100db837a1c7c30" + Warning: "" + 404: + description: "plugin not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "networkConfig" + in: "body" + description: "Network configuration" + required: true + schema: + type: "object" + required: ["Name"] + properties: + Name: + description: "The network's name." + type: "string" + CheckDuplicate: + description: "Check for networks with duplicate names." + type: "boolean" + Driver: + description: "Name of the network driver plugin to use." + type: "string" + default: "bridge" + Internal: + description: "Restrict external access to the network." + type: "boolean" + IPAM: + description: "Optional custom IP scheme for the network." + $ref: "#/definitions/IPAM" + EnableIPv6: + description: "Enable IPv6 on the network." + type: "boolean" + Options: + description: "Network specific options to be used by the drivers." + type: "object" + additionalProperties: + type: "string" + Labels: + description: "User-defined key/value metadata." + type: "object" + additionalProperties: + type: "string" + example: + Name: "isolated_nw" + CheckDuplicate: false + Driver: "bridge" + EnableIPv6: true + IPAM: + Driver: "default" + Config: + - Subnet: "172.20.0.0/16" + IPRange: "172.20.10.0/24" + Gateway: "172.20.10.11" + - Subnet: "2001:db8:abcd::/64" + Gateway: "2001:db8:abcd::1011" + Options: + foo: "bar" + Internal: true + Options: + com.docker.network.bridge.default_bridge: "true" + com.docker.network.bridge.enable_icc: "true" + com.docker.network.bridge.enable_ip_masquerade: "true" + com.docker.network.bridge.host_binding_ipv4: "0.0.0.0" + com.docker.network.bridge.name: "docker0" + com.docker.network.driver.mtu: "1500" + Labels: + com.example.some-label: "some-value" + com.example.some-other-label: "some-other-value" + tags: ["Network"] + + /networks/{id}/connect: + post: + summary: "Connect a container to a network" + operationId: "NetworksConnect" + consumes: + - "application/octet-stream" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to connect to the network." + EndpointConfig: + $ref: "#/definitions/EndpointSettings" + example: + Container: "3613f73ba0e4" + EndpointConfig: + IPAMConfig: + IPv4Address: "172.24.56.89" + IPv6Address: "2001:db8::5689" + tags: ["Network"] + + /networks/{id}/disconnect: + post: + summary: "Disconnect a container from a network" + operationId: "NetworksDisconnect" + consumes: + - "application/json" + responses: + 200: + description: "No error" + 403: + description: "Operation not supported for swarm scoped networks" + schema: + $ref: "#/definitions/ErrorResponse" + 404: + description: "Network or container not found" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "Network ID or name" + required: true + type: "string" + - name: "container" + in: "body" + required: true + schema: + type: "object" + properties: + Container: + type: "string" + description: "The ID or name of the container to disconnect from the network." + Force: + type: "boolean" + description: "Force the container to disconnect from the network." + tags: ["Network"] + /networks/prune: + post: + summary: "Delete unused networks" + consumes: + - "application/json" + produces: + - "application/json" + operationId: "NetworkPrune" + responses: + 200: + description: "No error" + schema: + type: "object" + properties: + VolumesDeleted: + description: "Networks that were deleted" + type: "array" + items: + type: "string" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Network" + /plugins: + get: + summary: "List plugins" + operationId: "PluginsList" + description: "Returns information about installed plugins." + produces: ["application/json"] + responses: + 200: + description: "No error" + schema: + type: "array" + items: + $ref: "#/definitions/Plugin" + example: + - Id: "5724e2c8652da337ab2eedd19fc6fc0ec908e4bd907c7421bf6a8dfc70c4c078" + Name: "tiborvass/no-remove" + Tag: "latest" + Active: true + Config: + Mounts: + - Name: "" + Description: "" + Settable: null + Source: "/data" + Destination: "/data" + Type: "bind" + Options: + - "shared" + - "rbind" + - Name: "" + Description: "" + Settable: null + Source: null + Destination: "/foobar" + Type: "tmpfs" + Options: null + Env: + - "DEBUG=1" + Args: null + Devices: null + Manifest: + ManifestVersion: "v0" + Description: "A test plugin for Docker" + Documentation: "https://docs.docker.com/engine/extend/plugins/" + Interface: + Types: + - "docker.volumedriver/1.0" + Socket: "plugins.sock" + Entrypoint: + - "plugin-no-remove" + - "/data" + Workdir: "" + User: {} + Network: + Type: "host" + Capabilities: null + Mounts: + - Name: "" + Description: "" + Settable: null + Source: "/data" + Destination: "/data" + Type: "bind" + Options: + - "shared" + - "rbind" + - Name: "" + Description: "" + Settable: null + Source: null + Destination: "/foobar" + Type: "tmpfs" + Options: null + Devices: + - Name: "device" + Description: "a host device to mount" + Settable: null + Path: "/dev/cpu_dma_latency" + Env: + - Name: "DEBUG" + Description: "If set, prints debug messages" + Settable: null + Value: "1" + Args: + Name: "args" + Description: "command line arguments" + Settable: null + Value: [] + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: ["Plugins"] + + /plugins/pull: + post: + summary: "Install a plugin" + operationId: "PostPluginsPull" + description: | + Pulls and installs a plugin. After the plugin is installed, it can be enabled using the [`POST /plugins/{name}/enable` endpoint](#operation/PostPluginEnable). + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + description: "Describes a permission the user has to accept upon installing the plugin." + type: "object" + properties: + Name: + type: "string" + Description: + type: "string" + Value: + type: "array" + items: + type: "string" + example: + - Name: "network" + Description: "" + Value: + - "host" + - Name: "mount" + Description: "" + Value: + - "/data" + - Name: "device" + Description: "" + Value: + - "/dev/cpu_dma_latency" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: | + The plugin to install. + + The `:latest` tag is optional, and is used as the default if omitted. + required: true + type: "string" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration to use when pulling a plugin from a registry. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: + - "Plugins" + /plugins/{name}: + get: + summary: "Inspect a plugin" + operationId: "GetPluginsInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugins" + delete: + summary: "Remove a plugin" + operationId: "DeletePlugins" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Plugin" + 404: + description: "plugin is not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "force" + in: "query" + description: "Disable the plugin before removing. This may result in issues if the plugin is in use by a container." + type: "boolean" + default: false + tags: + - "Plugins" + /plugins/{name}/enable: + post: + summary: "Enable a plugin" + operationId: "PostPluginsEnable" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugins" + /plugins/{name}/disable: + post: + summary: "Disable a plugin" + operationId: "PostPluginsDisable" + responses: + 200: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + tags: + - "Plugins" + /plugins/create: + post: + summary: "Create a plugin" + operationId: "PostPluginsCreate" + consumes: + - "application/x-tar" + responses: + 204: + description: "no error" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "name" + in: "query" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "tarContext" + in: "body" + description: "Path to tar containing plugin rootfs and manifest" + schema: + type: "string" + format: "binary" + tags: + - "Plugins" + /plugins/{name}/set: + post: + summary: "Configure a plugin" + operationId: "PostPluginsSet" + consumes: + - "application/json" + parameters: + - name: "name" + in: "path" + description: "The name of the plugin. The `:latest` tag is optional, and is the default if omitted." + required: true + type: "string" + - name: "body" + in: "body" + schema: + type: "array" + items: + type: "string" + example: ["DEBUG=1"] + responses: + 204: + description: "No error" + 404: + description: "Plugin not installed" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "Server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Plugins" + /nodes: + get: + summary: "List nodes" + operationId: "GetNodesList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Node" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + description: | + Filters to process on the nodes list, encoded as JSON (a `map[string][]string`). + + Available filters: + - `id=` + - `name=` + - `membership=`(`pending`|`accepted`|`rejected`)` + - `role=`(`worker`|`manager`)` + type: "string" + tags: + - "Nodes" + /nodes/{id}: + get: + summary: "Inspect a node" + operationId: "GetNodesInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Node" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + tags: + - "Nodes" + delete: + summary: "Delete a node" + operationId: "DeleteNodes" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "force" + in: "query" + description: "Force remove a node from the swarm" + default: false + type: "boolean" + tags: + - "Nodes" + /nodes/{id}/update: + post: + summary: "Update a node" + operationId: "PostNodesUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such node" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "The ID of the node" + type: "string" + required: true + - name: "body" + in: "body" + schema: + $ref: "#/definitions/NodeSpec" + - name: "version" + in: "query" + description: "The version number of the node object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + tags: + - "Nodes" + /swarm: + get: + summary: "Inspect swarm" + operationId: "GetSwarmInspect" + responses: + 200: + description: "no error" + schema: + allOf: + - $ref: "#/definitions/ClusterInfo" + - type: "object" + properties: + JoinTokens: + description: "The tokens workers and managers need to join the swarm." + type: "object" + properties: + Worker: + description: "The token workers can use to join the swarm." + type: "string" + Manager: + description: "The token managers can use to join the swarm." + type: "string" + example: + CreatedAt: "2016-08-15T16:00:20.349727406Z" + Spec: + Dispatcher: + HeartbeatPeriod: 5000000000 + Orchestration: + TaskHistoryRetentionLimit: 10 + CAConfig: + NodeCertExpiry: 7776000000000000 + Raft: + LogEntriesForSlowFollowers: 500 + HeartbeatTick: 1 + SnapshotInterval: 10000 + ElectionTick: 3 + TaskDefaults: {} + Name: "default" + JoinTokens: + Worker: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-6qmn92w6bu3jdvnglku58u11a" + Manager: "SWMTKN-1-1h8aps2yszaiqmz2l3oc5392pgk8e49qhx2aj3nyv0ui0hez2a-8llk83c4wm9lwioey2s316r9l" + ID: "70ilmkj2f6sp2137c753w2nmt" + UpdatedAt: "2016-08-15T16:32:09.623207604Z" + Version: + Index: 51 + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + tags: + - "Swarm" + /swarm/init: + post: + summary: "Initialize a new swarm" + operationId: "PostSwarmInit" + produces: + - "application/json" + - "text/plain" + responses: + 200: + description: "no error" + schema: + description: "The node ID" + type: "string" + example: "7v2t30z9blmxuhnyo6s4cpenp" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 406: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP). This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the default swarm listening port is used." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + ForceNewCluster: + description: "Force creation of a new swarm." + type: "boolean" + Spec: + $ref: "#/definitions/SwarmSpec" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + ForceNewCluster: false + Spec: + Orchestration: {} + Raft: {} + Dispatcher: {} + CAConfig: {} + tags: + - "Swarm" + /swarm/join: + post: + summary: "Join an existing swarm" + operationId: "PostSwarmJoin" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 406: + description: "node is already part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + type: "object" + properties: + ListenAddr: + description: "Listen address used for inter-manager communication if the node gets promoted to manager, as well as determining the networking interface used for the VXLAN Tunnel Endpoint (VTEP)." + type: "string" + AdvertiseAddr: + description: "Externally reachable address advertised to other nodes. This can either be an address/port combination in the form `192.168.1.1:4567`, or an interface followed by a port number, like `eth0:4567`. If the port number is omitted, the port number from the listen address is used. If `AdvertiseAddr` is not specified, it will be automatically detected when possible." + type: "string" + RemoteAddrs: + description: "Addresses of manager nodes already participating in the swarm." + type: "string" + JoinToken: + description: "Secret token for joining this swarm." + type: "string" + example: + ListenAddr: "0.0.0.0:2377" + AdvertiseAddr: "192.168.1.1:2377" + RemoteAddrs: + - "node1:2377" + JoinToken: "SWMTKN-1-3pu6hszjas19xyp7ghgosyx9k8atbfcr8p2is99znpy26u2lkl-7p73s1dx5in4tatdymyhg9hu2" + tags: + - "Swarm" + /swarm/leave: + post: + summary: "Leave a swarm" + operationId: "PostSwarmLeave" + responses: + 200: + description: "no error" + 406: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "force" + description: "Force leave swarm, even if this is the last manager or that it will break the cluster." + in: "query" + type: "boolean" + default: false + tags: + - "Swarm" + /swarm/update: + post: + summary: "Update a swarm" + operationId: "PostSwarmUpdate" + responses: + 200: + description: "no error" + 400: + description: "bad parameter" + schema: + $ref: "#/definitions/ErrorResponse" + 406: + description: "node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + $ref: "#/definitions/SwarmSpec" + - name: "version" + in: "query" + description: "The version number of the swarm object being updated. This is required to avoid conflicting writes." + type: "integer" + format: "int64" + required: true + - name: "rotateWorkerToken" + in: "query" + description: "Rotate the worker join token." + type: "boolean" + default: false + - name: "rotateManagerToken" + in: "query" + description: "Rotate the manager join token." + type: "boolean" + default: false + tags: + - "Swarm" + /services: + get: + summary: "List services" + operationId: "GetServicesList" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Service" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the services list. Available filters: + + - `id=` + - `name=` + tags: + - "Services" + /services/create: + post: + summary: "Create a service" + operationId: "PostServicesCreate" + consumes: + - "application/json" + produces: + - "application/json" + responses: + 201: + description: "no error" + schema: + type: "object" + properties: + ID: + description: "The ID of the created service." + type: "string" + example: + ID: "ak7w3gjqoa3kuz8xcpnyy0pvl" + 406: + description: "server error or node is not part of a swarm" + schema: + $ref: "#/definitions/ErrorResponse" + 409: + description: "name conflicts with an existing service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "web" + TaskTemplate: + ContainerSpec: + Image: "nginx:alpine" + Mounts: + - + ReadOnly: true + Source: "web-data" + Target: "/usr/share/nginx/html" + Type: "volume" + VolumeOptions: + DriverConfig: {} + Labels: + com.example.something: "something-value" + User: "33" + LogDriver: + Name: "json-file" + Options: + max-file: "3" + max-size: "10M" + Placement: {} + Resources: + Limits: + MemoryBytes: 104857600 + Reservations: {} + RestartPolicy: + Condition: "on-failure" + Delay: 10000000000 + MaxAttempts: 10 + Mode: + Replicated: + Replicas: 4 + UpdateConfig: + Delay: 30000000000 + Parallelism: 2 + FailureAction: "pause" + EndpointSpec: + Ports: + - + Protocol: "tcp" + PublishedPort: 8080 + TargetPort: 80 + Labels: + foo: "bar" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + tags: + - "Services" + /services/{id}: + get: + summary: "Inspect a service" + operationId: "GetServicesInspect" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Service" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: + - "Services" + delete: + summary: "Delete a service" + operationId: "DeleteServices" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + tags: + - "Services" + /services/{id}/update: + post: + summary: "Update a service" + operationId: "PostServicesUpdate" + responses: + 200: + description: "no error" + 404: + description: "no such service" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID or name of service." + required: true + type: "string" + - name: "body" + in: "body" + required: true + schema: + allOf: + - $ref: "#/definitions/ServiceSpec" + - type: "object" + example: + Name: "top" + TaskTemplate: + ContainerSpec: + Image: "busybox" + Args: + - "top" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ForceUpdate: 0 + Mode: + Replicated: + Replicas: 1 + UpdateConfig: + Parallelism: 1 + Monitor: 15000000000 + MaxFailureRatio: 0.15 + EndpointSpec: + Mode: "vip" + + - name: "version" + in: "query" + description: "The version number of the service object being updated. This is required to avoid conflicting writes." + required: true + type: "integer" + - name: "registryAuthFrom" + in: "query" + type: "string" + description: "If the X-Registry-Auth header is not specified, this + parameter indicates where to find registry authorization credentials. The + valid values are `spec` and `previous-spec`." + default: "spec" + - name: "X-Registry-Auth" + in: "header" + description: "A base64-encoded auth configuration for pulling from private registries. [See the authentication section for details.](#section/Authentication)" + type: "string" + + tags: + - "Services" + /tasks: + get: + summary: "List tasks" + operationId: "GetTasksList" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + type: "array" + items: + $ref: "#/definitions/Task" + example: + - ID: "0kzzo1i0y4jz6027t0k7aezc7" + Version: + Index: 71 + CreatedAt: "2016-06-07T21:07:31.171892745Z" + UpdatedAt: "2016-06-07T21:07:31.376370513Z" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:31.290032978Z" + State: "running" + Message: "started" + ContainerStatus: + ContainerID: "e5d62702a1b48d01c3e02ca1e0212a250801fa8d67caca0b6f35919ebc12f035" + PID: 677 + DesiredState: "running" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.10/16" + - ID: "1yljwbmlr8er2waf8orvqpwms" + Version: + Index: 30 + CreatedAt: "2016-06-07T21:07:30.019104782Z" + UpdatedAt: "2016-06-07T21:07:30.231958098Z" + Name: "hopeful_cori" + Spec: + ContainerSpec: + Image: "redis" + Resources: + Limits: {} + Reservations: {} + RestartPolicy: + Condition: "any" + MaxAttempts: 0 + Placement: {} + ServiceID: "9mnpnzenvg8p8tdbtq4wvbkcz" + Slot: 1 + NodeID: "60gvrl6tm78dmak4yl7srz94v" + Status: + Timestamp: "2016-06-07T21:07:30.202183143Z" + State: "shutdown" + Message: "shutdown" + ContainerStatus: + ContainerID: "1cf8d63d18e79668b0004a4be4c6ee58cddfad2dae29506d8781581d0688a213" + DesiredState: "shutdown" + NetworksAttachments: + - Network: + ID: "4qvuz4ko70xaltuqbt8956gd1" + Version: + Index: 18 + CreatedAt: "2016-06-07T20:31:11.912919752Z" + UpdatedAt: "2016-06-07T21:07:29.955277358Z" + Spec: + Name: "ingress" + Labels: + com.docker.swarm.internal: "true" + DriverConfiguration: {} + IPAMOptions: + Driver: {} + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + DriverState: + Name: "overlay" + Options: + com.docker.network.driver.overlay.vxlanid_list: "256" + IPAMOptions: + Driver: + Name: "default" + Configs: + - Subnet: "10.255.0.0/16" + Gateway: "10.255.0.1" + Addresses: + - "10.255.0.5/16" + + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "filters" + in: "query" + type: "string" + description: | + A JSON encoded value of the filters (a `map[string][]string`) to process on the tasks list. Available filters: + + - `id=` + - `name=` + - `service=` + - `node=` + - `label=key` or `label="key=value"` + - `desired-state=(running | shutdown | accepted)` + tags: + - "Tasks" + /tasks/{id}: + get: + summary: "Inspect a task" + operationId: "GetTasksInspect" + produces: + - "application/json" + responses: + 200: + description: "no error" + schema: + $ref: "#/definitions/Task" + 404: + description: "no such task" + schema: + $ref: "#/definitions/ErrorResponse" + 500: + description: "server error" + schema: + $ref: "#/definitions/ErrorResponse" + parameters: + - name: "id" + in: "path" + description: "ID of the task" + required: true + type: "string" + tags: + - "Tasks" diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go index b4d6a3c01..67e4446c6 100644 --- a/vendor/github.com/docker/docker/api/types/client.go +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -4,6 +4,7 @@ import ( "bufio" "io" "net" + "os" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" @@ -12,8 +13,20 @@ import ( // CheckpointCreateOptions holds parameters to create a checkpoint from a container type CheckpointCreateOptions struct { - CheckpointID string - Exit bool + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string } // ContainerAttachOptions holds parameters to attach to a container. @@ -23,6 +36,7 @@ type ContainerAttachOptions struct { Stdout bool Stderr bool DetachKeys string + Logs bool } // ContainerCommitOptions holds parameters to commit changes into a container. @@ -41,18 +55,19 @@ type ContainerExecInspect struct { ContainerID string Running bool ExitCode int + Pid int } // ContainerListOptions holds parameters to list containers with. type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filter filters.Args + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args } // ContainerLogsOptions holds parameters to filter logs with. @@ -75,7 +90,8 @@ type ContainerRemoveOptions struct { // ContainerStartOptions holds parameters to start containers. type ContainerStartOptions struct { - CheckpointID string + CheckpointID string + CheckpointDir string } // CopyToContainerOptions holds information @@ -140,6 +156,7 @@ type ImageBuildOptions struct { Memory int64 MemorySwap int64 CgroupParent string + NetworkMode string ShmSize int64 Dockerfile string Ulimits []*units.Ulimit @@ -185,9 +202,8 @@ type ImageImportOptions struct { // ImageListOptions holds parameters to filter the list of images with. type ImageListOptions struct { - MatchName string - All bool - Filters filters.Args + All bool + Filters filters.Args } // ImageLoadResponse returns information to the client about a load process. @@ -251,7 +267,7 @@ func (v VersionResponse) ServerOK() bool { // NodeListOptions holds parameters to list nodes with. type NodeListOptions struct { - Filter filters.Args + Filters filters.Args } // NodeRemoveOptions holds parameters to remove nodes with. @@ -301,15 +317,46 @@ type ServiceUpdateOptions struct { // ServiceListOptions holds parameters to list services with. type ServiceListOptions struct { - Filter filters.Args + Filters filters.Args } // TaskListOptions holds parameters to list tasks with. type TaskListOptions struct { - Filter filters.Args + Filters filters.Args } // PluginRemoveOptions holds parameters to remove plugins. type PluginRemoveOptions struct { Force bool } + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SecretRequestOption is a type for requesting secrets +type SecretRequestOption struct { + Source string + Target string + UID string + GID string + Mode os.FileMode +} + +// SwarmUnlockKeyResponse contains the response for Remote API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go new file mode 100644 index 000000000..d028e3b12 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -0,0 +1,21 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody container create created body +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 000000000..81ee12c67 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody container update o k body +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go new file mode 100644 index 000000000..16cf33532 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -0,0 +1,17 @@ +package container + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBody container wait o k body +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go index caa137137..0c82d625e 100644 --- a/vendor/github.com/docker/docker/api/types/container/host_config.go +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -234,6 +234,7 @@ type Resources struct { // Applicable to all platforms CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. // Applicable to UNIX platforms CgroupParent string // Parent cgroup. @@ -243,8 +244,10 @@ type Resources struct { BlkioDeviceWriteBps []*blkiodev.ThrottleDevice BlkioDeviceReadIOps []*blkiodev.ThrottleDevice BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime CpusetCpus string // CpusetCpus 0-2, 0,1 CpusetMems string // CpusetMems 0-2, 0,1 Devices []DeviceMapping // List of devices to map inside the container diff --git a/vendor/github.com/docker/docker/api/types/container/secret.go b/vendor/github.com/docker/docker/api/types/container/secret.go new file mode 100644 index 000000000..a9bbd2f04 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/secret.go @@ -0,0 +1,14 @@ +package container + +import "os" + +// ContainerSecret represents a secret in a container. This gets realized +// in the container tmpfs +type ContainerSecret struct { + Name string + Target string + Data []byte + UID string + GID string + Mode os.FileMode +} diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 000000000..dc942d9d9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/docker/api/types/errors.go b/vendor/github.com/docker/docker/api/types/errors.go deleted file mode 100644 index 649ab9513..000000000 --- a/vendor/github.com/docker/docker/api/types/errors.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// ErrorResponse is the response body of API errors. -type ErrorResponse struct { - Message string `json:"message"` -} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 000000000..7592d2f8b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 000000000..e145b3dcf --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go index 5516ed09d..8ac89402f 100644 --- a/vendor/github.com/docker/docker/api/types/mount/mount.go +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -1,24 +1,35 @@ package mount +import ( + "os" +) + // Type represents the type of a mount. type Type string +// Type constants const ( - // TypeBind BIND + // TypeBind is the type for mounting host dir TypeBind Type = "bind" - // TypeVolume VOLUME + // TypeVolume is the type for remote storage volumes TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" ) // Mount represents a mount (volume). type Mount struct { - Type Type `json:",omitempty"` + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) Source string `json:",omitempty"` Target string `json:",omitempty"` ReadOnly bool `json:",omitempty"` BindOptions *BindOptions `json:",omitempty"` VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` } // Propagation represents the propagation of a mount. @@ -56,3 +67,37 @@ type Driver struct { Name string `json:",omitempty"` Options map[string]string `json:",omitempty"` } + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be convered to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From docker/docker/pkg/mount/flags.go: + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go index 47080b652..832b3edb9 100644 --- a/vendor/github.com/docker/docker/api/types/network/network.go +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -28,6 +28,12 @@ type EndpointIPAMConfig struct { LinkLocalIPs []string `json:",omitempty"` } +// PeerInfo represents one peer of a overlay network +type PeerInfo struct { + Name string + IP string +} + // EndpointSettings stores the network endpoint details type EndpointSettings struct { // Configurations diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go index 11091ef27..d3fc41439 100644 --- a/vendor/github.com/docker/docker/api/types/plugin.go +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -1,168 +1,159 @@ package types -import ( - "encoding/json" - "fmt" -) - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Remote API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True when the plugin is running. False when the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` + + // tag + // Required: true + Tag string `json:"Tag"` } -// PluginConfig represents the values of settings potentially modifiable by a user +// PluginConfig The config of a plugin. +// swagger:model PluginConfig type PluginConfig struct { - Mounts []PluginMount - Env []string - Args []string - Devices []PluginDevice -} -// Plugin represents a Docker plugin for the remote API -type Plugin struct { - ID string `json:"Id,omitempty"` - Name string - Tag string - // Enabled is true when the plugin is running, is false when the plugin is not running, only installed. - Enabled bool - Config PluginConfig - Manifest PluginManifest -} + // args + // Required: true + Args PluginConfigArgs `json:"Args"` -// PluginsListResponse contains the response for the remote API -type PluginsListResponse []*Plugin - -const ( - authzDriver = "AuthzDriver" - graphDriver = "GraphDriver" - ipamDriver = "IpamDriver" - networkDriver = "NetworkDriver" - volumeDriver = "VolumeDriver" -) - -// PluginInterfaceType represents a type that a plugin implements. -type PluginInterfaceType struct { - Prefix string // This is always "docker" - Capability string // Capability should be validated against the above list. - Version string // Plugin API version. Depends on the capability -} + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} + // description + // Required: true + Description string `json:"Description"` -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} + // documentation + // Required: true + Documentation string `json:"Documentation"` -// PluginInterface describes the interface between Docker and plugin -type PluginInterface struct { - Types []PluginInterfaceType - Socket string -} + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` -// PluginSetting is to be embedded in other structs, if they are supposed to be -// modifiable by the user. -type PluginSetting struct { - Name string - Description string - Settable []string -} + // env + // Required: true + Env []PluginEnv `json:"Env"` -// PluginNetwork represents the network configuration for a plugin -type PluginNetwork struct { - Type string -} + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` -// PluginMount represents the mount configuration for a plugin -type PluginMount struct { - PluginSetting - Source *string - Destination string - Type string - Options []string -} + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` -// PluginEnv represents an environment variable for a plugin -type PluginEnv struct { - PluginSetting - Value *string -} + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // user + User PluginConfigUser `json:"User,omitempty"` -// PluginArgs represents the command line arguments for a plugin -type PluginArgs struct { - PluginSetting - Value []string + // workdir + // Required: true + Workdir string `json:"Workdir"` } -// PluginDevice represents a device for a plugin -type PluginDevice struct { - PluginSetting - Path *string +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` } -// PluginUser represents the user for the plugin's process -type PluginUser struct { - UID uint32 `json:"Uid,omitempty"` - GID uint32 `json:"Gid,omitempty"` +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` } -// PluginManifest represents the manifest of a plugin -type PluginManifest struct { - ManifestVersion string - Description string - Documentation string - Interface PluginInterface - Entrypoint []string - Workdir string - User PluginUser `json:",omitempty"` - Network PluginNetwork - Capabilities []string - Mounts []PluginMount - Devices []PluginDevice - Env []PluginEnv - Args PluginArgs +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` } -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` } -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 000000000..569901067 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 000000000..32962dc2e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 000000000..c82f204e8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 000000000..5c031cf8b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 000000000..8ef07d7fd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,64 @@ +package types + +import ( + "encoding/json" + "fmt" +) + +// PluginsListResponse contains the response for the remote API +type PluginsListResponse []*Plugin + +const ( + authzDriver = "AuthzDriver" + graphDriver = "GraphDriver" + ipamDriver = "IpamDriver" + networkDriver = "NetworkDriver" + volumeDriver = "VolumeDriver" +) + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 000000000..ad52d46d5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // IP + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 000000000..5e37d19bd --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go index c3eab8e97..9bf1928b8 100644 --- a/vendor/github.com/docker/docker/api/types/stats.go +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -170,6 +170,9 @@ type Stats struct { type StatsJSON struct { Stats + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + // Networks request version >=1.21 Networks map[string]NetworkStats `json:"networks,omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go index 4a84f2e53..4ab476ccc 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/container.go +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -3,20 +3,44 @@ package swarm import ( "time" + "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/mount" ) +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + // ContainerSpec represents the spec of a container. type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Groups []string `json:",omitempty"` - TTY bool `json:",omitempty"` - Mounts []mount.Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` } diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go index 76b0bea1b..2ba533911 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/network.go +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -31,8 +31,23 @@ type PortConfig struct { TargetPort uint32 `json:",omitempty"` // PublishedPort is the port on the swarm hosts PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` } +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + // PortConfigProtocol represents the protocol of a port. type PortConfigProtocol string diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go index 785e76e34..379e17a77 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/node.go +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -54,7 +54,7 @@ type NodeDescription struct { Engine EngineDescription `json:",omitempty"` } -// Platform represents the platfrom (Arch/OS). +// Platform represents the platform (Arch/OS). type Platform struct { Architecture string `json:",omitempty"` OS string `json:",omitempty"` @@ -77,6 +77,7 @@ type PluginDescription struct { type NodeStatus struct { State NodeState `json:",omitempty"` Message string `json:",omitempty"` + Addr string `json:",omitempty"` } // Reachability represents the reachability of a node. diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 000000000..dbed63af8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,33 @@ +package swarm + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec + Digest string + SecretSize int64 +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + SecretID string + SecretName string + Target *SecretReferenceFileTarget +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go index e96d331ae..0b4221969 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/swarm.go +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -28,11 +28,12 @@ type JoinTokens struct { type Spec struct { Annotations - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` } // OrchestrationConfig represents orchestration configuration. @@ -53,6 +54,14 @@ type TaskDefaults struct { LogDriver *Driver `json:",omitempty"` } +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + // RaftConfig represents raft configuration. type RaftConfig struct { // SnapshotInterval is the number of log entries between snapshots. @@ -60,7 +69,7 @@ type RaftConfig struct { // KeepOldSnapshots is the number of snapshots to keep beyond the // current snapshot. - KeepOldSnapshots uint64 `json:",omitempty"` + KeepOldSnapshots *uint64 `json:",omitempty"` // LogEntriesForSlowFollowers is the number of log entries to keep // around to sync up slow followers after a snapshot is created. @@ -121,10 +130,11 @@ type ExternalCA struct { // InitRequest is the request used to init a swarm. type InitRequest struct { - ListenAddr string - AdvertiseAddr string - ForceNewCluster bool - Spec Spec + ListenAddr string + AdvertiseAddr string + ForceNewCluster bool + Spec Spec + AutoLockManagers bool } // JoinRequest is the request used to join a swarm. @@ -135,6 +145,12 @@ type JoinRequest struct { JoinToken string // accept by secret } +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + // LocalNodeState represents the state of the local node. type LocalNodeState string @@ -147,6 +163,8 @@ const ( LocalNodeStateActive LocalNodeState = "active" // LocalNodeStateError ERROR LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" ) // Info represents generic information about swarm. @@ -173,6 +191,7 @@ type Peer struct { // UpdateFlags contains flags for SwarmUpdate. type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool } diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go index 591e07ba4..ace12cc89 100644 --- a/vendor/github.com/docker/docker/api/types/swarm/task.go +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -61,6 +61,10 @@ type TaskSpec struct { // spec. If not present, the one on cluster default on swarm.Spec will be // used, finally falling back to the engine default if not specified. LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 } // Resources represents resources (CPU/Memory). @@ -107,6 +111,7 @@ type TaskStatus struct { Message string `json:",omitempty"` Err string `json:",omitempty"` ContainerStatus ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` } // ContainerStatus represents the status of a container. @@ -115,3 +120,9 @@ type ContainerStatus struct { PID int `json:",omitempty"` ExitCode int `json:",omitempty"` } + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go index e74d57e50..41a902cbe 100644 --- a/vendor/github.com/docker/docker/api/types/types.go +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -6,6 +6,7 @@ import ( "time" "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" "github.com/docker/docker/api/types/mount" "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" @@ -13,54 +14,6 @@ import ( "github.com/docker/go-connections/nat" ) -// ContainerCreateResponse contains the information returned to a client on the -// creation of a new container. -type ContainerCreateResponse struct { - // ID is the ID of the created container. - ID string `json:"Id"` - - // Warnings are any warnings encountered during the creation of the container. - Warnings []string `json:"Warnings"` -} - -// ContainerExecCreateResponse contains response of Remote API: -// POST "/containers/{name:.*}/exec" -type ContainerExecCreateResponse struct { - // ID is the exec ID. - ID string `json:"Id"` -} - -// ContainerUpdateResponse contains response of Remote API: -// POST "/containers/{name:.*}/update" -type ContainerUpdateResponse struct { - // Warnings are any warnings encountered during the updating of the container. - Warnings []string `json:"Warnings"` -} - -// AuthResponse contains response of Remote API: -// POST "/auth" -type AuthResponse struct { - // Status is the authentication status - Status string `json:"Status"` - - // IdentityToken is an opaque token used for authenticating - // a user after a successful login. - IdentityToken string `json:"IdentityToken,omitempty"` -} - -// ContainerWaitResponse contains response of Remote API: -// POST "/containers/"+containerID+"/wait" -type ContainerWaitResponse struct { - // StatusCode is the status code of the wait job - StatusCode int `json:"StatusCode"` -} - -// ContainerCommitResponse contains response of Remote API: -// POST "/commit?container="+containerID -type ContainerCommitResponse struct { - ID string `json:"Id"` -} - // ContainerChange contains response of Remote API: // GET "/containers/{name:.*}/changes" type ContainerChange struct { @@ -86,21 +39,6 @@ type ImageDelete struct { Deleted string `json:",omitempty"` } -// Image contains response of Remote API: -// GET "/images/json" -type Image struct { - ID string `json:"Id"` - ParentID string `json:"ParentId"` - RepoTags []string - RepoDigests []string - Created int64 - Size int64 - SharedSize int64 - VirtualSize int64 - Labels map[string]string - Containers int64 -} - // GraphDriverData returns Image's graph driver config info // when calling inspect command type GraphDriverData struct { @@ -138,15 +76,6 @@ type ImageInspect struct { RootFS RootFS } -// Port stores open ports info of container -// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} -type Port struct { - IP string `json:",omitempty"` - PrivatePort int - PublicPort int `json:",omitempty"` - Type string -} - // Container contains response of Remote API: // GET "/containers/json" type Container struct { @@ -200,11 +129,19 @@ type ContainerProcessList struct { Titles []string } +// Ping contains response of Remote API: +// GET "/_ping" +type Ping struct { + APIVersion string + Experimental bool +} + // Version contains response of Remote API: // GET "/version" type Version struct { Version string APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` GitCommit string GoVersion string Os string @@ -214,9 +151,16 @@ type Version struct { BuildTime string `json:",omitempty"` } -// Info contains response of Remote API: +// Commit records a external tool actual commit id version along the +// one expect by dockerd as set at build time +type Commit struct { + ID string + Expected string +} + +// InfoBase contains the base response of Remote API: // GET "/info" -type Info struct { +type InfoBase struct { ID string Containers int ContainersRunning int @@ -263,7 +207,6 @@ type Info struct { ServerVersion string ClusterStore string ClusterAdvertise string - SecurityOptions []string Runtimes map[string]Runtime DefaultRuntime string Swarm swarm.Info @@ -272,6 +215,22 @@ type Info struct { // running containers are detected LiveRestoreEnabled bool Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit +} + +// SecurityOpt holds key/value pair about a security option +type SecurityOpt struct { + Key, Value string +} + +// Info contains response of Remote API: +// GET "/info" +type Info struct { + *InfoBase + SecurityOptions []SecurityOpt } // PluginsInfo is a temp struct holding Plugins name @@ -304,9 +263,10 @@ type HealthcheckResult struct { // Health states const ( - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem ) // Health stores information about the container's healthcheck results @@ -433,43 +393,11 @@ type MountPoint struct { Propagation mount.Propagation } -// VolumeUsageData holds information regarding the volume usage -type VolumeUsageData struct { - Size int64 // Size holds how much disk space is used by the (local driver only). Sets to -1 if not provided. - RefCount int // RefCount holds the number of containers having this volume attached to them. Sets to -1 if not provided. -} - -// Volume represents the configuration of a volume for the remote API -type Volume struct { - Name string // Name is the name of the volume - Driver string // Driver is the Driver name used to create the volume - Mountpoint string // Mountpoint is the location on disk of the volume - Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume - Labels map[string]string // Labels is metadata specific to the volume - Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) - UsageData *VolumeUsageData `json:",omitempty"` -} - -// VolumesListResponse contains the response for the remote API: -// GET "/volumes" -type VolumesListResponse struct { - Volumes []*Volume // Volumes is the list of volumes being returned - Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers -} - -// VolumeCreateRequest contains the request for the remote API: -// POST "/volumes/create" -type VolumeCreateRequest struct { - Name string // Name is the requested name of the volume - Driver string // Driver is the name of the driver that should be used to create the volume - DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. - Labels map[string]string // Labels holds metadata specific to the volume being created. -} - // NetworkResource is the body of the "get network" http response message type NetworkResource struct { Name string // Name is the requested name of the network ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 @@ -479,6 +407,7 @@ type NetworkResource struct { Containers map[string]EndpointResource // Containers contains endpoints belonging to the network Options map[string]string // Options holds the network specific options to use for when creating the network Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network } // EndpointResource contains network resources allocated and used for a container in a network @@ -541,7 +470,7 @@ type Runtime struct { // GET "/system/df" type DiskUsage struct { LayersSize int64 - Images []*Image + Images []*ImageSummary Containers []*Container Volumes []*Volume } @@ -562,6 +491,11 @@ type ContainersPruneConfig struct { type VolumesPruneConfig struct { } +// NetworksPruneConfig contains the configuration for Remote API: +// POST "/networks/prune" +type NetworksPruneConfig struct { +} + // ContainersPruneReport contains the response for Remote API: // POST "/containers/prune" type ContainersPruneReport struct { @@ -582,3 +516,21 @@ type ImagesPruneReport struct { ImagesDeleted []ImageDelete SpaceReclaimed uint64 } + +// NetworksPruneReport contains the response for Remote API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go new file mode 100644 index 000000000..f2db0cda7 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -0,0 +1,58 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // A mapping of abitrary key/value data set on this volume. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level. + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData volume usage data +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. + // Required: true + RefCount int64 `json:"RefCount"` + + // The disk space used by the volume (local driver only) + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_create.go b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go new file mode 100644 index 000000000..5f9513a22 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volumes_create.go @@ -0,0 +1,29 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +// VolumesCreateBody volumes create body +// swagger:model VolumesCreateBody +type VolumesCreateBody struct { + + // Name of the volume driver to use. + // Required: true + Driver string `json:"Driver"` + + // A mapping of driver options and values. These options are passed directly to the driver and are driver specific. + // Required: true + DriverOpts map[string]string `json:"DriverOpts"` + + // A mapping of arbitrary key/value data to set on the volume. + // Required: true + Labels map[string]string `json:"Labels"` + + // The new volume's name. If not specified, Docker generates a name. + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/volume/volumes_list.go b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go new file mode 100644 index 000000000..7770bcb8f --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume/volumes_list.go @@ -0,0 +1,23 @@ +package volume + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/swagger-gen.sh +// ---------------------------------------------------------------------------- + +import "github.com/docker/docker/api/types" + +// VolumesListOKBody volumes list o k body +// swagger:model VolumesListOKBody +type VolumesListOKBody struct { + + // List of volumes + // Required: true + Volumes []*types.Volume `json:"Volumes"` + + // Warnings that occurred when fetching the list of volumes + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/client/checkpoint_delete.go b/vendor/github.com/docker/docker/client/checkpoint_delete.go index a4e9ed0c0..e6e75588b 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_delete.go +++ b/vendor/github.com/docker/docker/client/checkpoint_delete.go @@ -1,12 +1,20 @@ package client import ( + "net/url" + + "github.com/docker/docker/api/types" "golang.org/x/net/context" ) // CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, checkpointID string) error { - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+checkpointID, nil, nil) +func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, options types.CheckpointDeleteOptions) error { + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+options.CheckpointID, query, nil) ensureReaderClosed(resp) return err } diff --git a/vendor/github.com/docker/docker/client/checkpoint_list.go b/vendor/github.com/docker/docker/client/checkpoint_list.go index bb471e005..8eb720a6b 100644 --- a/vendor/github.com/docker/docker/client/checkpoint_list.go +++ b/vendor/github.com/docker/docker/client/checkpoint_list.go @@ -2,16 +2,22 @@ package client import ( "encoding/json" + "net/url" "github.com/docker/docker/api/types" "golang.org/x/net/context" ) // CheckpointList returns the volumes configured in the docker host. -func (cli *Client) CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) { +func (cli *Client) CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) { var checkpoints []types.Checkpoint - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", nil, nil) + query := url.Values{} + if options.CheckpointDir != "" { + query.Set("dir", options.CheckpointDir) + } + + resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", query, nil) if err != nil { return checkpoints, err } diff --git a/vendor/github.com/docker/docker/client/client.go b/vendor/github.com/docker/docker/client/client.go index 75073881c..76a1ac07c 100644 --- a/vendor/github.com/docker/docker/client/client.go +++ b/vendor/github.com/docker/docker/client/client.go @@ -58,7 +58,7 @@ import ( ) // DefaultVersion is the version of the current stable API -const DefaultVersion string = "1.23" +const DefaultVersion string = "1.25" // Client is the API client that performs all operations // against a docker server. @@ -79,6 +79,8 @@ type Client struct { version string // custom http headers configured by users. customHTTPHeaders map[string]string + // manualOverride is set to true when the version was set by users. + manualOverride bool } // NewEnvClient initializes a new API client based on environment variables. @@ -111,13 +113,19 @@ func NewEnvClient() (*Client, error) { if host == "" { host = DefaultDockerHost } - version := os.Getenv("DOCKER_API_VERSION") if version == "" { version = DefaultVersion } - return NewClient(host, version, client, nil) + cli, err := NewClient(host, version, client, nil) + if err != nil { + return cli, err + } + if version != "" { + cli.manualOverride = true + } + return cli, nil } // NewClient initializes a new API client for the given host and API version. @@ -168,6 +176,19 @@ func NewClient(host string, version string, client *http.Client, httpHeaders map }, nil } +// Close ensures that transport.Client is closed +// especially needed while using NewClient with *http.Client = nil +// for example +// client.NewClient("unix:///var/run/docker.sock", nil, "v1.18", map[string]string{"User-Agent": "engine-api-cli-1.0"}) +func (cli *Client) Close() error { + + if t, ok := cli.client.Transport.(*http.Transport); ok { + t.CloseIdleConnections() + } + + return nil +} + // getAPIPath returns the versioned request path to call the api. // It appends the query parameters to the path if they are not empty. func (cli *Client) getAPIPath(p string, query url.Values) string { @@ -198,7 +219,10 @@ func (cli *Client) ClientVersion() string { // UpdateClientVersion updates the version string associated with this // instance of the Client. func (cli *Client) UpdateClientVersion(v string) { - cli.version = v + if !cli.manualOverride { + cli.version = v + } + } // ParseHost verifies that the given host strings is valid. diff --git a/vendor/github.com/docker/docker/client/container_attach.go b/vendor/github.com/docker/docker/client/container_attach.go index 7cfc860fc..eea468215 100644 --- a/vendor/github.com/docker/docker/client/container_attach.go +++ b/vendor/github.com/docker/docker/client/container_attach.go @@ -28,6 +28,9 @@ func (cli *Client) ContainerAttach(ctx context.Context, container string, option if options.DetachKeys != "" { query.Set("detachKeys", options.DetachKeys) } + if options.Logs { + query.Set("logs", "1") + } headers := map[string][]string{"Content-Type": {"text/plain"}} return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) diff --git a/vendor/github.com/docker/docker/client/container_commit.go b/vendor/github.com/docker/docker/client/container_commit.go index 363950cc2..c766d62e4 100644 --- a/vendor/github.com/docker/docker/client/container_commit.go +++ b/vendor/github.com/docker/docker/client/container_commit.go @@ -12,16 +12,16 @@ import ( ) // ContainerCommit applies changes into a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { +func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) { var repository, tag string if options.Reference != "" { distributionRef, err := distreference.ParseNamed(options.Reference) if err != nil { - return types.ContainerCommitResponse{}, err + return types.IDResponse{}, err } if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference") + return types.IDResponse{}, errors.New("refusing to create a tag with a digest reference") } tag = reference.GetTagFromNamedRef(distributionRef) @@ -41,7 +41,7 @@ func (cli *Client) ContainerCommit(ctx context.Context, container string, option query.Set("pause", "0") } - var response types.ContainerCommitResponse + var response types.IDResponse resp, err := cli.post(ctx, "/commit", query, options.Config, nil) if err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/container_create.go b/vendor/github.com/docker/docker/client/container_create.go index a86217295..9f627aafa 100644 --- a/vendor/github.com/docker/docker/client/container_create.go +++ b/vendor/github.com/docker/docker/client/container_create.go @@ -5,7 +5,6 @@ import ( "net/url" "strings" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" "golang.org/x/net/context" @@ -19,8 +18,13 @@ type configWrapper struct { // ContainerCreate creates a new container based in the given configuration. // It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) { - var response types.ContainerCreateResponse +func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) { + var response container.ContainerCreateCreatedBody + + if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil { + return response, err + } + query := url.Values{} if containerName != "" { query.Set("name", containerName) diff --git a/vendor/github.com/docker/docker/client/container_exec.go b/vendor/github.com/docker/docker/client/container_exec.go index 34173d319..0665c54fb 100644 --- a/vendor/github.com/docker/docker/client/container_exec.go +++ b/vendor/github.com/docker/docker/client/container_exec.go @@ -8,8 +8,13 @@ import ( ) // ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { - var response types.ContainerExecCreateResponse +func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) { + var response types.IDResponse + + if err := cli.NewVersionError("1.25", "env"); len(config.Env) != 0 && err != nil { + return response, err + } + resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) if err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/container_list.go b/vendor/github.com/docker/docker/client/container_list.go index a8945d84f..439891219 100644 --- a/vendor/github.com/docker/docker/client/container_list.go +++ b/vendor/github.com/docker/docker/client/container_list.go @@ -34,8 +34,8 @@ func (cli *Client) ContainerList(ctx context.Context, options types.ContainerLis query.Set("size", "1") } - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter) + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/container_prune.go b/vendor/github.com/docker/docker/client/container_prune.go index 0d8bd3292..3eabe71a7 100644 --- a/vendor/github.com/docker/docker/client/container_prune.go +++ b/vendor/github.com/docker/docker/client/container_prune.go @@ -12,6 +12,10 @@ import ( func (cli *Client) ContainersPrune(ctx context.Context, cfg types.ContainersPruneConfig) (types.ContainersPruneReport, error) { var report types.ContainersPruneReport + if err := cli.NewVersionError("1.25", "container prune"); err != nil { + return report, err + } + serverResp, err := cli.post(ctx, "/containers/prune", nil, cfg, nil) if err != nil { return report, err diff --git a/vendor/github.com/docker/docker/client/container_start.go b/vendor/github.com/docker/docker/client/container_start.go index 44bb0080c..b1f08de41 100644 --- a/vendor/github.com/docker/docker/client/container_start.go +++ b/vendor/github.com/docker/docker/client/container_start.go @@ -14,6 +14,9 @@ func (cli *Client) ContainerStart(ctx context.Context, containerID string, optio if len(options.CheckpointID) != 0 { query.Set("checkpoint", options.CheckpointID) } + if len(options.CheckpointDir) != 0 { + query.Set("checkpoint-dir", options.CheckpointDir) + } resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/client/container_stats.go b/vendor/github.com/docker/docker/client/container_stats.go index 3be7a988f..4758c66e3 100644 --- a/vendor/github.com/docker/docker/client/container_stats.go +++ b/vendor/github.com/docker/docker/client/container_stats.go @@ -21,6 +21,6 @@ func (cli *Client) ContainerStats(ctx context.Context, containerID string, strea return types.ContainerStats{}, err } - osType := GetDockerOS(resp.header.Get("Server")) + osType := getDockerOS(resp.header.Get("Server")) return types.ContainerStats{Body: resp.body, OSType: osType}, err } diff --git a/vendor/github.com/docker/docker/client/container_update.go b/vendor/github.com/docker/docker/client/container_update.go index 48b75bee3..5082f22df 100644 --- a/vendor/github.com/docker/docker/client/container_update.go +++ b/vendor/github.com/docker/docker/client/container_update.go @@ -3,14 +3,13 @@ package client import ( "encoding/json" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "golang.org/x/net/context" ) // ContainerUpdate updates resources of a container -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (types.ContainerUpdateResponse, error) { - var response types.ContainerUpdateResponse +func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) { + var response container.ContainerUpdateOKBody serverResp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) if err != nil { return response, err diff --git a/vendor/github.com/docker/docker/client/container_wait.go b/vendor/github.com/docker/docker/client/container_wait.go index 8a858f0ea..93212c70e 100644 --- a/vendor/github.com/docker/docker/client/container_wait.go +++ b/vendor/github.com/docker/docker/client/container_wait.go @@ -5,19 +5,19 @@ import ( "golang.org/x/net/context" - "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" ) // ContainerWait pauses execution until a container exits. // It returns the API status code as response of its readiness. -func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) { +func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int64, error) { resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) if err != nil { return -1, err } defer ensureReaderClosed(resp) - var res types.ContainerWaitResponse + var res container.ContainerWaitOKBody if err := json.NewDecoder(resp.body).Decode(&res); err != nil { return -1, err } diff --git a/vendor/github.com/docker/docker/client/errors.go b/vendor/github.com/docker/docker/client/errors.go index ad1dadabb..94c22a728 100644 --- a/vendor/github.com/docker/docker/client/errors.go +++ b/vendor/github.com/docker/docker/client/errors.go @@ -3,6 +3,8 @@ package client import ( "errors" "fmt" + + "github.com/docker/docker/api/types/versions" ) // ErrConnectionFailed is an error raised when the connection between the client and the server failed. @@ -206,3 +208,34 @@ func IsErrPluginPermissionDenied(err error) bool { _, ok := err.(pluginPermissionDenied) return ok } + +// NewVersionError returns an error if the APIVersion required +// if less than the current supported version +func (cli *Client) NewVersionError(APIrequired, feature string) error { + if versions.LessThan(cli.version, APIrequired) { + return fmt.Errorf("%q requires API version %s, but the Docker server is version %s", feature, APIrequired, cli.version) + } + return nil +} + +// secretNotFoundError implements an error returned when a secret is not found. +type secretNotFoundError struct { + name string +} + +// Error returns a string representation of a secretNotFoundError +func (e secretNotFoundError) Error() string { + return fmt.Sprintf("Error: no such secret: %s", e.name) +} + +// NoFound indicates that this error type is of NotFound +func (e secretNotFoundError) NotFound() bool { + return true +} + +// IsErrSecretNotFound returns true if the error is caused +// when a secret is not found. +func IsErrSecretNotFound(err error) bool { + _, ok := err.(secretNotFoundError) + return ok +} diff --git a/vendor/github.com/docker/docker/client/hijack.go b/vendor/github.com/docker/docker/client/hijack.go index dededb7af..74c53f52b 100644 --- a/vendor/github.com/docker/docker/client/hijack.go +++ b/vendor/github.com/docker/docker/client/hijack.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "net" + "net/http" "net/http/httputil" "net/url" "strings" @@ -38,12 +39,14 @@ func (cli *Client) postHijacked(ctx context.Context, path string, query url.Valu return types.HijackedResponse{}, err } - req, err := cli.newRequest("POST", path, query, bodyEncoded, headers) + apiPath := cli.getAPIPath(path, query) + req, err := http.NewRequest("POST", apiPath, bodyEncoded) if err != nil { return types.HijackedResponse{}, err } - req.Host = cli.addr + req = cli.addHeaders(req, headers) + req.Host = cli.addr req.Header.Set("Connection", "Upgrade") req.Header.Set("Upgrade", "tcp") diff --git a/vendor/github.com/docker/docker/client/image_build.go b/vendor/github.com/docker/docker/client/image_build.go index 3abd87025..6fde75dcf 100644 --- a/vendor/github.com/docker/docker/client/image_build.go +++ b/vendor/github.com/docker/docker/client/image_build.go @@ -6,7 +6,6 @@ import ( "io" "net/http" "net/url" - "regexp" "strconv" "golang.org/x/net/context" @@ -15,13 +14,11 @@ import ( "github.com/docker/docker/api/types/container" ) -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - // ImageBuild sends request to the daemon to build images. // The Body in the response implement an io.ReadCloser and it's up to the caller to // close it. func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := imageBuildOptionsToQuery(options) + query, err := cli.imageBuildOptionsToQuery(options) if err != nil { return types.ImageBuildResponse{}, err } @@ -39,7 +36,7 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio return types.ImageBuildResponse{}, err } - osType := GetDockerOS(serverResp.header.Get("Server")) + osType := getDockerOS(serverResp.header.Get("Server")) return types.ImageBuildResponse{ Body: serverResp.body, @@ -47,7 +44,7 @@ func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, optio }, nil } -func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { +func (cli *Client) imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { query := url.Values{ "t": options.Tags, "securityopt": options.SecurityOpt, @@ -76,6 +73,9 @@ func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, erro } if options.Squash { + if err := cli.NewVersionError("1.25", "squash"); err != nil { + return query, err + } query.Set("squash", "1") } @@ -84,6 +84,7 @@ func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, erro } query.Set("cpusetcpus", options.CPUSetCPUs) + query.Set("networkmode", options.NetworkMode) query.Set("cpusetmems", options.CPUSetMems) query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) @@ -120,13 +121,3 @@ func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, erro return query, nil } - -// GetDockerOS returns the operating system based on the server header from the daemon. -func GetDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} diff --git a/vendor/github.com/docker/docker/client/image_list.go b/vendor/github.com/docker/docker/client/image_list.go index 00f27dc0c..63c71b1dd 100644 --- a/vendor/github.com/docker/docker/client/image_list.go +++ b/vendor/github.com/docker/docker/client/image_list.go @@ -10,8 +10,8 @@ import ( ) // ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) { - var images []types.Image +func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) { + var images []types.ImageSummary query := url.Values{} if options.Filters.Len() > 0 { @@ -21,10 +21,6 @@ func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions } query.Set("filters", filterJSON) } - if options.MatchName != "" { - // FIXME rename this parameter, to not be confused with the filters flag - query.Set("filter", options.MatchName) - } if options.All { query.Set("all", "1") } diff --git a/vendor/github.com/docker/docker/client/image_prune.go b/vendor/github.com/docker/docker/client/image_prune.go index f6752e504..d5e69d5b1 100644 --- a/vendor/github.com/docker/docker/client/image_prune.go +++ b/vendor/github.com/docker/docker/client/image_prune.go @@ -12,6 +12,10 @@ import ( func (cli *Client) ImagesPrune(ctx context.Context, cfg types.ImagesPruneConfig) (types.ImagesPruneReport, error) { var report types.ImagesPruneReport + if err := cli.NewVersionError("1.25", "image prune"); err != nil { + return report, err + } + serverResp, err := cli.post(ctx, "/images/prune", nil, cfg, nil) if err != nil { return report, err diff --git a/vendor/github.com/docker/docker/client/interface.go b/vendor/github.com/docker/docker/client/interface.go index de06b848a..7a3ebe8b4 100644 --- a/vendor/github.com/docker/docker/client/interface.go +++ b/vendor/github.com/docker/docker/client/interface.go @@ -11,6 +11,7 @@ import ( "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/registry" "github.com/docker/docker/api/types/swarm" + volumetypes "github.com/docker/docker/api/types/volume" "golang.org/x/net/context" ) @@ -20,8 +21,10 @@ type CommonAPIClient interface { ImageAPIClient NodeAPIClient NetworkAPIClient + PluginAPIClient ServiceAPIClient SwarmAPIClient + SecretAPIClient SystemAPIClient VolumeAPIClient ClientVersion() string @@ -32,11 +35,11 @@ type CommonAPIClient interface { // ContainerAPIClient defines API client methods for the containers type ContainerAPIClient interface { ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) + ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.IDResponse, error) + ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (container.ContainerCreateCreatedBody, error) ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) + ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.IDResponse, error) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error @@ -57,8 +60,8 @@ type ContainerAPIClient interface { ContainerStop(ctx context.Context, container string, timeout *time.Duration) error ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (types.ContainerUpdateResponse, error) - ContainerWait(ctx context.Context, container string) (int, error) + ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) (container.ContainerUpdateOKBody, error) + ContainerWait(ctx context.Context, container string) (int64, error) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error ContainersPrune(ctx context.Context, cfg types.ContainersPruneConfig) (types.ContainersPruneReport, error) @@ -71,7 +74,7 @@ type ImageAPIClient interface { ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) ImageInspectWithRaw(ctx context.Context, image string) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) + ImageList(ctx context.Context, options types.ImageListOptions) ([]types.ImageSummary, error) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) @@ -91,6 +94,7 @@ type NetworkAPIClient interface { NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) NetworkRemove(ctx context.Context, networkID string) error + NetworksPrune(ctx context.Context, cfg types.NetworksPruneConfig) (types.NetworksPruneReport, error) } // NodeAPIClient defines API client methods for the nodes @@ -101,6 +105,19 @@ type NodeAPIClient interface { NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error } +// PluginAPIClient defines API client methods for the plugins +type PluginAPIClient interface { + PluginList(ctx context.Context) (types.PluginsListResponse, error) + PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error + PluginEnable(ctx context.Context, name string) error + PluginDisable(ctx context.Context, name string) error + PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error + PluginPush(ctx context.Context, name string, registryAuth string) error + PluginSet(ctx context.Context, name string, args []string) error + PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + PluginCreate(ctx context.Context, createContext io.Reader, options types.PluginCreateOptions) error +} + // ServiceAPIClient defines API client methods for the services type ServiceAPIClient interface { ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) @@ -108,6 +125,7 @@ type ServiceAPIClient interface { ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) ServiceRemove(ctx context.Context, serviceID string) error ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) error + ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) } @@ -116,6 +134,8 @@ type ServiceAPIClient interface { type SwarmAPIClient interface { SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error + SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) + SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error SwarmLeave(ctx context.Context, force bool) error SwarmInspect(ctx context.Context) (swarm.Swarm, error) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error @@ -125,16 +145,25 @@ type SwarmAPIClient interface { type SystemAPIClient interface { Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) Info(ctx context.Context) (types.Info, error) - RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) + RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) DiskUsage(ctx context.Context) (types.DiskUsage, error) + Ping(ctx context.Context) (types.Ping, error) } // VolumeAPIClient defines API client methods for the volumes type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) + VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) + VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) VolumeRemove(ctx context.Context, volumeID string, force bool) error VolumesPrune(ctx context.Context, cfg types.VolumesPruneConfig) (types.VolumesPruneReport, error) } + +// SecretAPIClient defines API client methods for secrets +type SecretAPIClient interface { + SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) + SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) + SecretRemove(ctx context.Context, id string) error + SecretInspectWithRaw(ctx context.Context, name string) (swarm.Secret, []byte, error) +} diff --git a/vendor/github.com/docker/docker/client/interface_experimental.go b/vendor/github.com/docker/docker/client/interface_experimental.go index 1ddc517c9..51da98ecd 100644 --- a/vendor/github.com/docker/docker/client/interface_experimental.go +++ b/vendor/github.com/docker/docker/client/interface_experimental.go @@ -1,5 +1,3 @@ -// +build experimental - package client import ( @@ -7,31 +5,13 @@ import ( "golang.org/x/net/context" ) -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient +type apiClientExperimental interface { CheckpointAPIClient - PluginAPIClient } // CheckpointAPIClient defines API client methods for the checkpoints type CheckpointAPIClient interface { CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, checkpointID string) error - CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) -} - -// PluginAPIClient defines API client methods for the plugins -type PluginAPIClient interface { - PluginList(ctx context.Context) (types.PluginsListResponse, error) - PluginRemove(ctx context.Context, name string, options types.PluginRemoveOptions) error - PluginEnable(ctx context.Context, name string) error - PluginDisable(ctx context.Context, name string) error - PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error - PluginPush(ctx context.Context, name string, registryAuth string) error - PluginSet(ctx context.Context, name string, args []string) error - PluginInspectWithRaw(ctx context.Context, name string) (*types.Plugin, []byte, error) + CheckpointDelete(ctx context.Context, container string, options types.CheckpointDeleteOptions) error + CheckpointList(ctx context.Context, container string, options types.CheckpointListOptions) ([]types.Checkpoint, error) } - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/github.com/docker/docker/client/interface_stable.go b/vendor/github.com/docker/docker/client/interface_stable.go index 496f522d5..cc90a3cbb 100644 --- a/vendor/github.com/docker/docker/client/interface_stable.go +++ b/vendor/github.com/docker/docker/client/interface_stable.go @@ -1,10 +1,9 @@ -// +build !experimental - package client // APIClient is an interface that clients that talk with a docker server must implement. type APIClient interface { CommonAPIClient + apiClientExperimental } // Ensure that Client always implements APIClient. diff --git a/vendor/github.com/docker/docker/client/login.go b/vendor/github.com/docker/docker/client/login.go index d8d277ccb..600dc7196 100644 --- a/vendor/github.com/docker/docker/client/login.go +++ b/vendor/github.com/docker/docker/client/login.go @@ -6,22 +6,23 @@ import ( "net/url" "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/registry" "golang.org/x/net/context" ) // RegistryLogin authenticates the docker server with a given docker registry. // It returns UnauthorizerError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) { +func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (registry.AuthenticateOKBody, error) { resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) if resp.statusCode == http.StatusUnauthorized { - return types.AuthResponse{}, unauthorizedError{err} + return registry.AuthenticateOKBody{}, unauthorizedError{err} } if err != nil { - return types.AuthResponse{}, err + return registry.AuthenticateOKBody{}, err } - var response types.AuthResponse + var response registry.AuthenticateOKBody err = json.NewDecoder(resp.body).Decode(&response) ensureReaderClosed(resp) return response, err diff --git a/vendor/github.com/docker/docker/client/network_prune.go b/vendor/github.com/docker/docker/client/network_prune.go new file mode 100644 index 000000000..01185f2e0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/network_prune.go @@ -0,0 +1,26 @@ +package client + +import ( + "encoding/json" + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// NetworksPrune requests the daemon to delete unused networks +func (cli *Client) NetworksPrune(ctx context.Context, cfg types.NetworksPruneConfig) (types.NetworksPruneReport, error) { + var report types.NetworksPruneReport + + serverResp, err := cli.post(ctx, "/networks/prune", nil, cfg, nil) + if err != nil { + return report, err + } + defer ensureReaderClosed(serverResp) + + if err := json.NewDecoder(serverResp.body).Decode(&report); err != nil { + return report, fmt.Errorf("Error retrieving network prune report: %v", err) + } + + return report, nil +} diff --git a/vendor/github.com/docker/docker/client/node_list.go b/vendor/github.com/docker/docker/client/node_list.go index 0716875cc..3e8440f08 100644 --- a/vendor/github.com/docker/docker/client/node_list.go +++ b/vendor/github.com/docker/docker/client/node_list.go @@ -14,8 +14,8 @@ import ( func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { query := url.Values{} - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filter) + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) if err != nil { return nil, err diff --git a/vendor/github.com/docker/docker/client/ping.go b/vendor/github.com/docker/docker/client/ping.go new file mode 100644 index 000000000..22dcda24f --- /dev/null +++ b/vendor/github.com/docker/docker/client/ping.go @@ -0,0 +1,30 @@ +package client + +import ( + "fmt" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// Ping pings the server and return the value of the "Docker-Experimental" & "API-Version" headers +func (cli *Client) Ping(ctx context.Context) (types.Ping, error) { + var ping types.Ping + req, err := cli.buildRequest("GET", fmt.Sprintf("%s/_ping", cli.basePath), nil, nil) + if err != nil { + return ping, err + } + serverResp, err := cli.doRequest(ctx, req) + if err != nil { + return ping, err + } + defer ensureReaderClosed(serverResp) + + ping.APIVersion = serverResp.header.Get("API-Version") + + if serverResp.header.Get("Docker-Experimental") == "true" { + ping.Experimental = true + } + + return ping, nil +} diff --git a/vendor/github.com/docker/docker/client/plugin_create.go b/vendor/github.com/docker/docker/client/plugin_create.go new file mode 100644 index 000000000..a660ba573 --- /dev/null +++ b/vendor/github.com/docker/docker/client/plugin_create.go @@ -0,0 +1,26 @@ +package client + +import ( + "io" + "net/http" + "net/url" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// PluginCreate creates a plugin +func (cli *Client) PluginCreate(ctx context.Context, createContext io.Reader, createOptions types.PluginCreateOptions) error { + headers := http.Header(make(map[string][]string)) + headers.Set("Content-Type", "application/tar") + + query := url.Values{} + query.Set("name", createOptions.RepoName) + + resp, err := cli.postRaw(ctx, "/plugins/create", query, createContext, headers) + if err != nil { + return err + } + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/plugin_disable.go b/vendor/github.com/docker/docker/client/plugin_disable.go index 893fc6e82..51e456512 100644 --- a/vendor/github.com/docker/docker/client/plugin_disable.go +++ b/vendor/github.com/docker/docker/client/plugin_disable.go @@ -1,5 +1,3 @@ -// +build experimental - package client import ( diff --git a/vendor/github.com/docker/docker/client/plugin_enable.go b/vendor/github.com/docker/docker/client/plugin_enable.go index 84422abc7..8109814dd 100644 --- a/vendor/github.com/docker/docker/client/plugin_enable.go +++ b/vendor/github.com/docker/docker/client/plugin_enable.go @@ -1,5 +1,3 @@ -// +build experimental - package client import ( diff --git a/vendor/github.com/docker/docker/client/plugin_inspect.go b/vendor/github.com/docker/docker/client/plugin_inspect.go index 7ba8db57a..e9474b5a9 100644 --- a/vendor/github.com/docker/docker/client/plugin_inspect.go +++ b/vendor/github.com/docker/docker/client/plugin_inspect.go @@ -1,5 +1,3 @@ -// +build experimental - package client import ( diff --git a/vendor/github.com/docker/docker/client/plugin_install.go b/vendor/github.com/docker/docker/client/plugin_install.go index 9ee32eea9..d0a3d517f 100644 --- a/vendor/github.com/docker/docker/client/plugin_install.go +++ b/vendor/github.com/docker/docker/client/plugin_install.go @@ -1,5 +1,3 @@ -// +build experimental - package client import ( @@ -47,9 +45,17 @@ func (cli *Client) PluginInstall(ctx context.Context, name string, options types return pluginPermissionDenied{name} } } + + if len(options.Args) > 0 { + if err := cli.PluginSet(ctx, name, options.Args); err != nil { + return err + } + } + if options.Disabled { return nil } + return cli.PluginEnable(ctx, name) } diff --git a/vendor/github.com/docker/docker/client/plugin_list.go b/vendor/github.com/docker/docker/client/plugin_list.go index 48b470247..88c480a3e 100644 --- a/vendor/github.com/docker/docker/client/plugin_list.go +++ b/vendor/github.com/docker/docker/client/plugin_list.go @@ -1,5 +1,3 @@ -// +build experimental - package client import ( diff --git a/vendor/github.com/docker/docker/client/plugin_push.go b/vendor/github.com/docker/docker/client/plugin_push.go index 3afea5ed7..d83bbdc35 100644 --- a/vendor/github.com/docker/docker/client/plugin_push.go +++ b/vendor/github.com/docker/docker/client/plugin_push.go @@ -1,5 +1,3 @@ -// +build experimental - package client import ( diff --git a/vendor/github.com/docker/docker/client/plugin_remove.go b/vendor/github.com/docker/docker/client/plugin_remove.go index 1483f2854..b017e4d34 100644 --- a/vendor/github.com/docker/docker/client/plugin_remove.go +++ b/vendor/github.com/docker/docker/client/plugin_remove.go @@ -1,5 +1,3 @@ -// +build experimental - package client import ( diff --git a/vendor/github.com/docker/docker/client/plugin_set.go b/vendor/github.com/docker/docker/client/plugin_set.go index fb40f38b2..3260d2a90 100644 --- a/vendor/github.com/docker/docker/client/plugin_set.go +++ b/vendor/github.com/docker/docker/client/plugin_set.go @@ -1,5 +1,3 @@ -// +build experimental - package client import ( diff --git a/vendor/github.com/docker/docker/client/request.go b/vendor/github.com/docker/docker/client/request.go index bfd62bad1..ac0536365 100644 --- a/vendor/github.com/docker/docker/client/request.go +++ b/vendor/github.com/docker/docker/client/request.go @@ -38,21 +38,29 @@ func (cli *Client) get(ctx context.Context, path string, query url.Values, heade // postWithContext sends an http request to the docker API using the method POST with a specific go context. func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "POST", path, query, obj, headers) + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "POST", path, query, body, headers) } func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendClientRequest(ctx, "POST", path, query, body, headers) + return cli.sendRequest(ctx, "POST", path, query, body, headers) } // put sends an http request to the docker API using the method PUT. func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - return cli.sendRequest(ctx, "PUT", path, query, obj, headers) + body, headers, err := encodeBody(obj, headers) + if err != nil { + return serverResponse{}, err + } + return cli.sendRequest(ctx, "PUT", path, query, body, headers) } // put sends an http request to the docker API using the method PUT. func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - return cli.sendClientRequest(ctx, "PUT", path, query, body, headers) + return cli.sendRequest(ctx, "PUT", path, query, body, headers) } // delete sends an http request to the docker API using the method DELETE. @@ -60,39 +68,35 @@ func (cli *Client) delete(ctx context.Context, path string, query url.Values, he return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) } -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (serverResponse, error) { - var body io.Reader +type headers map[string][]string - if obj != nil { - var err error - body, err = encodeData(obj) - if err != nil { - return serverResponse{}, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} +func encodeBody(obj interface{}, headers headers) (io.Reader, headers, error) { + if obj == nil { + return nil, headers, nil } - return cli.sendClientRequest(ctx, method, path, query, body, headers) -} - -func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (serverResponse, error) { - serverResp := serverResponse{ - body: nil, - statusCode: -1, + body, err := encodeData(obj) + if err != nil { + return nil, headers, err } + if headers == nil { + headers = make(map[string][]string) + } + headers["Content-Type"] = []string{"application/json"} + return body, headers, nil +} +func (cli *Client) buildRequest(method, path string, body io.Reader, headers headers) (*http.Request, error) { expectedPayload := (method == "POST" || method == "PUT") if expectedPayload && body == nil { body = bytes.NewReader([]byte{}) } - req, err := cli.newRequest(method, path, query, body, headers) + req, err := http.NewRequest(method, path, body) if err != nil { - return serverResp, err + return nil, err } + req = cli.addHeaders(req, headers) if cli.proto == "unix" || cli.proto == "npipe" { // For local communications, it doesn't matter what the host is. We just @@ -106,6 +110,19 @@ func (cli *Client) sendClientRequest(ctx context.Context, method, path string, q if expectedPayload && req.Header.Get("Content-Type") == "" { req.Header.Set("Content-Type", "text/plain") } + return req, nil +} + +func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers headers) (serverResponse, error) { + req, err := cli.buildRequest(method, cli.getAPIPath(path, query), body, headers) + if err != nil { + return serverResponse{}, err + } + return cli.doRequest(ctx, req) +} + +func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResponse, error) { + serverResp := serverResponse{statusCode: -1} resp, err := ctxhttp.Do(ctx, cli.client, req) if err != nil { @@ -143,6 +160,20 @@ func (cli *Client) sendClientRequest(ctx context.Context, method, path string, q } } + // Although there's not a strongly typed error for this in go-winio, + // lots of people are using the default configuration for the docker + // daemon on Windows where the daemon is listening on a named pipe + // `//./pipe/docker_engine, and the client must be running elevated. + // Give users a clue rather than the not-overly useful message + // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.25/info: + // open //./pipe/docker_engine: The system cannot find the file specified.`. + // Note we can't string compare "The system cannot find the file specified" as + // this is localised - for example in French the error would be + // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.` + if strings.Contains(err.Error(), `open //./pipe/docker_engine`) { + err = errors.New(err.Error() + " In the default daemon configuration on Windows, the docker client must be run elevated to connect. This error may also indicate that the docker daemon is not running.") + } + return serverResp, errors.Wrap(err, "error during connect") } @@ -179,16 +210,13 @@ func (cli *Client) sendClientRequest(ctx context.Context, method, path string, q return serverResp, nil } -func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) { - apiPath := cli.getAPIPath(path, query) - req, err := http.NewRequest(method, apiPath, body) - if err != nil { - return nil, err - } - +func (cli *Client) addHeaders(req *http.Request, headers headers) *http.Request { // Add CLI Config's HTTP Headers BEFORE we set the Docker headers // then the user can't change OUR headers for k, v := range cli.customHTTPHeaders { + if versions.LessThan(cli.version, "1.25") && k == "User-Agent" { + continue + } req.Header.Set(k, v) } @@ -197,8 +225,7 @@ func (cli *Client) newRequest(method, path string, query url.Values, body io.Rea req.Header[k] = v } } - - return req, nil + return req } func encodeData(data interface{}) (*bytes.Buffer, error) { diff --git a/vendor/github.com/docker/docker/client/secret_create.go b/vendor/github.com/docker/docker/client/secret_create.go new file mode 100644 index 000000000..f92a3d151 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_create.go @@ -0,0 +1,24 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretCreate creates a new Secret. +func (cli *Client) SecretCreate(ctx context.Context, secret swarm.SecretSpec) (types.SecretCreateResponse, error) { + var headers map[string][]string + + var response types.SecretCreateResponse + resp, err := cli.post(ctx, "/secrets", nil, secret, headers) + if err != nil { + return response, err + } + + err = json.NewDecoder(resp.body).Decode(&response) + ensureReaderClosed(resp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/secret_inspect.go b/vendor/github.com/docker/docker/client/secret_inspect.go new file mode 100644 index 000000000..f77457611 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_inspect.go @@ -0,0 +1,34 @@ +package client + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/http" + + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretInspectWithRaw returns the secret information with raw data +func (cli *Client) SecretInspectWithRaw(ctx context.Context, id string) (swarm.Secret, []byte, error) { + resp, err := cli.get(ctx, "/secrets/"+id, nil, nil) + if err != nil { + if resp.statusCode == http.StatusNotFound { + return swarm.Secret{}, nil, secretNotFoundError{id} + } + return swarm.Secret{}, nil, err + } + defer ensureReaderClosed(resp) + + body, err := ioutil.ReadAll(resp.body) + if err != nil { + return swarm.Secret{}, nil, err + } + + var secret swarm.Secret + rdr := bytes.NewReader(body) + err = json.NewDecoder(rdr).Decode(&secret) + + return secret, body, err +} diff --git a/vendor/github.com/docker/docker/client/secret_list.go b/vendor/github.com/docker/docker/client/secret_list.go new file mode 100644 index 000000000..7e9d5ec16 --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_list.go @@ -0,0 +1,35 @@ +package client + +import ( + "encoding/json" + "net/url" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SecretList returns the list of secrets. +func (cli *Client) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { + query := url.Values{} + + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) + if err != nil { + return nil, err + } + + query.Set("filters", filterJSON) + } + + resp, err := cli.get(ctx, "/secrets", query, nil) + if err != nil { + return nil, err + } + + var secrets []swarm.Secret + err = json.NewDecoder(resp.body).Decode(&secrets) + ensureReaderClosed(resp) + return secrets, err +} diff --git a/vendor/github.com/docker/docker/client/secret_remove.go b/vendor/github.com/docker/docker/client/secret_remove.go new file mode 100644 index 000000000..1955b988a --- /dev/null +++ b/vendor/github.com/docker/docker/client/secret_remove.go @@ -0,0 +1,10 @@ +package client + +import "golang.org/x/net/context" + +// SecretRemove removes a Secret. +func (cli *Client) SecretRemove(ctx context.Context, id string) error { + resp, err := cli.delete(ctx, "/secrets/"+id, nil, nil) + ensureReaderClosed(resp) + return err +} diff --git a/vendor/github.com/docker/docker/client/service_list.go b/vendor/github.com/docker/docker/client/service_list.go index 4ebc9f301..c29e6d407 100644 --- a/vendor/github.com/docker/docker/client/service_list.go +++ b/vendor/github.com/docker/docker/client/service_list.go @@ -14,8 +14,8 @@ import ( func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { query := url.Values{} - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filter) + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/service_logs.go b/vendor/github.com/docker/docker/client/service_logs.go new file mode 100644 index 000000000..24384e3ec --- /dev/null +++ b/vendor/github.com/docker/docker/client/service_logs.go @@ -0,0 +1,52 @@ +package client + +import ( + "io" + "net/url" + "time" + + "golang.org/x/net/context" + + "github.com/docker/docker/api/types" + timetypes "github.com/docker/docker/api/types/time" +) + +// ServiceLogs returns the logs generated by a service in an io.ReadCloser. +// It's up to the caller to close the stream. +func (cli *Client) ServiceLogs(ctx context.Context, serviceID string, options types.ContainerLogsOptions) (io.ReadCloser, error) { + query := url.Values{} + if options.ShowStdout { + query.Set("stdout", "1") + } + + if options.ShowStderr { + query.Set("stderr", "1") + } + + if options.Since != "" { + ts, err := timetypes.GetTimestamp(options.Since, time.Now()) + if err != nil { + return nil, err + } + query.Set("since", ts) + } + + if options.Timestamps { + query.Set("timestamps", "1") + } + + if options.Details { + query.Set("details", "1") + } + + if options.Follow { + query.Set("follow", "1") + } + query.Set("tail", options.Tail) + + resp, err := cli.get(ctx, "/services/"+serviceID+"/logs", query, nil) + if err != nil { + return nil, err + } + return resp.body, nil +} diff --git a/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go new file mode 100644 index 000000000..be28d3262 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_get_unlock_key.go @@ -0,0 +1,21 @@ +package client + +import ( + "encoding/json" + + "github.com/docker/docker/api/types" + "golang.org/x/net/context" +) + +// SwarmGetUnlockKey retrieves the swarm's unlock key. +func (cli *Client) SwarmGetUnlockKey(ctx context.Context) (types.SwarmUnlockKeyResponse, error) { + serverResp, err := cli.get(ctx, "/swarm/unlockkey", nil, nil) + if err != nil { + return types.SwarmUnlockKeyResponse{}, err + } + + var response types.SwarmUnlockKeyResponse + err = json.NewDecoder(serverResp.body).Decode(&response) + ensureReaderClosed(serverResp) + return response, err +} diff --git a/vendor/github.com/docker/docker/client/swarm_unlock.go b/vendor/github.com/docker/docker/client/swarm_unlock.go new file mode 100644 index 000000000..addfb59f0 --- /dev/null +++ b/vendor/github.com/docker/docker/client/swarm_unlock.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/docker/docker/api/types/swarm" + "golang.org/x/net/context" +) + +// SwarmUnlock unlockes locked swarm. +func (cli *Client) SwarmUnlock(ctx context.Context, req swarm.UnlockRequest) error { + serverResp, err := cli.post(ctx, "/swarm/unlock", nil, req, nil) + if err != nil { + return err + } + + ensureReaderClosed(serverResp) + return err +} diff --git a/vendor/github.com/docker/docker/client/swarm_update.go b/vendor/github.com/docker/docker/client/swarm_update.go index f0be145ba..cc8eeb655 100644 --- a/vendor/github.com/docker/docker/client/swarm_update.go +++ b/vendor/github.com/docker/docker/client/swarm_update.go @@ -15,6 +15,7 @@ func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm query.Set("version", strconv.FormatUint(version.Index, 10)) query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) + query.Set("rotateManagerUnlockKey", fmt.Sprintf("%v", flags.RotateManagerUnlockKey)) resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) ensureReaderClosed(resp) return err diff --git a/vendor/github.com/docker/docker/client/task_list.go b/vendor/github.com/docker/docker/client/task_list.go index 07c8324c8..66324da95 100644 --- a/vendor/github.com/docker/docker/client/task_list.go +++ b/vendor/github.com/docker/docker/client/task_list.go @@ -14,8 +14,8 @@ import ( func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { query := url.Values{} - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filter) + if options.Filters.Len() > 0 { + filterJSON, err := filters.ToParam(options.Filters) if err != nil { return nil, err } diff --git a/vendor/github.com/docker/docker/client/utils.go b/vendor/github.com/docker/docker/client/utils.go new file mode 100644 index 000000000..03bf4c82f --- /dev/null +++ b/vendor/github.com/docker/docker/client/utils.go @@ -0,0 +1,15 @@ +package client + +import "regexp" + +var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) + +// getDockerOS returns the operating system based on the server header from the daemon. +func getDockerOS(serverHeader string) string { + var osType string + matches := headerRegexp.FindStringSubmatch(serverHeader) + if len(matches) > 0 { + osType = matches[1] + } + return osType +} diff --git a/vendor/github.com/docker/docker/client/volume_create.go b/vendor/github.com/docker/docker/client/volume_create.go index f3a79f1e1..9620c87cb 100644 --- a/vendor/github.com/docker/docker/client/volume_create.go +++ b/vendor/github.com/docker/docker/client/volume_create.go @@ -4,11 +4,12 @@ import ( "encoding/json" "github.com/docker/docker/api/types" + volumetypes "github.com/docker/docker/api/types/volume" "golang.org/x/net/context" ) // VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) { +func (cli *Client) VolumeCreate(ctx context.Context, options volumetypes.VolumesCreateBody) (types.Volume, error) { var volume types.Volume resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) if err != nil { diff --git a/vendor/github.com/docker/docker/client/volume_list.go b/vendor/github.com/docker/docker/client/volume_list.go index 44f03cfac..32247ce11 100644 --- a/vendor/github.com/docker/docker/client/volume_list.go +++ b/vendor/github.com/docker/docker/client/volume_list.go @@ -4,14 +4,14 @@ import ( "encoding/json" "net/url" - "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" + volumetypes "github.com/docker/docker/api/types/volume" "golang.org/x/net/context" ) // VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) { - var volumes types.VolumesListResponse +func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (volumetypes.VolumesListOKBody, error) { + var volumes volumetypes.VolumesListOKBody query := url.Values{} if filter.Len() > 0 { diff --git a/vendor/github.com/docker/docker/client/volume_prune.go b/vendor/github.com/docker/docker/client/volume_prune.go index e7ea7b591..ea4e234a3 100644 --- a/vendor/github.com/docker/docker/client/volume_prune.go +++ b/vendor/github.com/docker/docker/client/volume_prune.go @@ -12,6 +12,10 @@ import ( func (cli *Client) VolumesPrune(ctx context.Context, cfg types.VolumesPruneConfig) (types.VolumesPruneReport, error) { var report types.VolumesPruneReport + if err := cli.NewVersionError("1.25", "volume prune"); err != nil { + return report, err + } + serverResp, err := cli.post(ctx, "/volumes/prune", nil, cfg, nil) if err != nil { return report, err diff --git a/vendor/github.com/docker/docker/client/volume_remove.go b/vendor/github.com/docker/docker/client/volume_remove.go index 3d5aeff25..6c26575b4 100644 --- a/vendor/github.com/docker/docker/client/volume_remove.go +++ b/vendor/github.com/docker/docker/client/volume_remove.go @@ -3,14 +3,17 @@ package client import ( "net/url" + "github.com/docker/docker/api/types/versions" "golang.org/x/net/context" ) // VolumeRemove removes a volume from the docker host. func (cli *Client) VolumeRemove(ctx context.Context, volumeID string, force bool) error { query := url.Values{} - if force { - query.Set("force", "1") + if versions.GreaterThanOrEqualTo(cli.version, "1.25") { + if force { + query.Set("force", "1") + } } resp, err := cli.delete(ctx, "/volumes/"+volumeID, query, nil) ensureReaderClosed(resp) diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/config.go b/vendor/github.com/docker/docker/pkg/tlsconfig/config.go deleted file mode 100644 index 20604f8d1..000000000 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/config.go +++ /dev/null @@ -1,132 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, -} - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. -func ServerDefault() *tls.Config { - return &tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, - } -} - -// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. -func ClientDefault() *tls.Config { - return &tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, - } -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - certPool := x509.NewCertPool() - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - logrus.Debugf("Trusting %d certs", len(certPool.Subjects())) - return certPool, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault() - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify && options.CAFile != "" { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - if options.CertFile != "" || options.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - } - - return tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault() - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - return tlsConfig, nil -} diff --git a/vendor/github.com/docker/docker/poule.yml b/vendor/github.com/docker/docker/poule.yml new file mode 100644 index 000000000..61aab4551 --- /dev/null +++ b/vendor/github.com/docker/docker/poule.yml @@ -0,0 +1,88 @@ +# Add a "status/0-triage" to every newly opened pull request. +- triggers: + pull_request: [ opened ] + operations: + - type: label + settings: { + patterns: { + status/0-triage: [ ".*" ], + } + } + +# For every newly created or modified issue, assign label based on matching regexp using the `label` +# operation, as well as an Engine-specific version label using `version-label`. +- triggers: + issues: [ edited, opened, reopened ] + operations: + - type: label + settings: { + patterns: { + area/builder: [ "dockerfile", "docker build" ], + area/distribution: [ "docker login", "docker logout", "docker pull", "docker push", "docker search" ], + area/plugins: [ "docker plugin" ], + area/networking: [ "docker network", "ipvs", "vxlan" ], + area/runtime: [ "oci runtime error" ], + area/security/trust: [ "docker_content_trust" ], + area/swarm: [ "docker node", "docker service", "docker swarm" ], + platform/desktop: [ "docker for mac", "docker for windows" ], + platform/freebsd: [ "freebsd" ], + platform/windows: [ "nanoserver", "windowsservercore", "windows server" ], + } + } + - type: version-label + +# When a pull request is closed, attach it to the currently active milestone. +- triggers: + pull_request: [ closed ] + operations: + - type: version-milestone + +# Labeling a PR with `rebuild/` triggers a rebuild job for the associated +# configuration. The label is automatically removed after the rebuild is initiated. There's no such +# thing as "templating" in this configuration, so we need one operation for each type of +# configuration that can be triggered. +- triggers: + pull_request: [ labeled ] + operations: + - type: rebuild + settings: { + # When configurations are empty, the `rebuild` operation rebuilds all the currently + # known statuses for that pull request. + configurations: [], + label: "rebuild/*", + } + - type: rebuild + settings: { + configurations: [ arm ], + label: "rebuild/arm", + } + - type: rebuild + settings: { + configurations: [ experimental ], + label: "rebuild/experimental", + } + - type: rebuild + settings: { + configurations: [ janky ], + label: "rebuild/janky", + } + - type: rebuild + settings: { + configurations: [ userns ], + label: "rebuild/userns", + } + - type: rebuild + settings: { + configurations: [ vendor ], + label: "rebuild/vendor", + } + - type: rebuild + settings: { + configurations: [ win2lin ], + label: "rebuild/win2lin", + } + - type: rebuild + settings: { + configurations: [ windowsRS1 ], + label: "rebuild/windowsRS1", + } diff --git a/vendor/github.com/docker/docker/vendor.conf b/vendor/github.com/docker/docker/vendor.conf new file mode 100644 index 000000000..5ea45b32e --- /dev/null +++ b/vendor/github.com/docker/docker/vendor.conf @@ -0,0 +1,140 @@ +# the following lines are in sorted order, FYI +github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 +github.com/Microsoft/hcsshim v0.5.8 +github.com/Microsoft/go-winio v0.3.6 +github.com/Sirupsen/logrus f76d643702a30fbffecdfe50831e11881c96ceb3 https://github.com/aaronlehmann/logrus +github.com/davecgh/go-spew 6d212800a42e8ab5c146b8ace3490ee17e5225f9 +github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a +github.com/go-check/check 4ed411733c5785b40214c70bce814c3a3a689609 https://github.com/cpuguy83/check.git +github.com/gorilla/context v1.1 +github.com/gorilla/mux v1.1 +github.com/kr/pty 5cf931ef8f +github.com/mattn/go-shellwords v1.0.0 +github.com/mattn/go-sqlite3 v1.1.0 +github.com/tchap/go-patricia v2.2.6 +github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 +# forked golang.org/x/net package includes a patch for lazy loading trace templates +golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git +golang.org/x/sys 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9 +github.com/docker/go-units 8a7beacffa3009a9ac66bad506b18ffdd110cf97 +github.com/docker/go-connections f512407a188ecb16f31a33dbc9c4e4814afc1b03 + +github.com/RackSec/srslog 365bf33cd9acc21ae1c355209865f17228ca534e +github.com/imdario/mergo 0.2.1 + +#get libnetwork packages +github.com/docker/libnetwork 57be722e077059d1ee0539be31743a3642ccbeb3 +github.com/docker/go-events 18b43f1bc85d9cdd42c05a6cd2d444c7a200a894 +github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 +github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec +github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b +github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 +github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e +github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 +github.com/docker/libkv v0.2.1 +github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 +github.com/vishvananda/netlink e73bad418fd727ed3a02830b1af1ad0283a1de6c +github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 +github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 +github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d +github.com/coreos/etcd 3a49cbb769ebd8d1dd25abb1e83386e9883a5707 +github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 +github.com/hashicorp/consul v0.5.2 +github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 +github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 + +# get graph and distribution packages +github.com/docker/distribution 8016d2d8903e378edacac11e4d809efbc987ad61 +github.com/vbatts/tar-split v0.10.1 + +# get go-zfs packages +github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa +github.com/pborman/uuid v1.0 + +# get desired notary commit, might also need to be updated in Dockerfile +github.com/docker/notary v0.4.2 + +google.golang.org/grpc v1.0.2 +github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f +github.com/docker/go v1.5.1-1-1-gbaf439e +github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c + +github.com/opencontainers/runc ac031b5bf1cc92239461125f4c1ffb760522bbf2 # libcontainer +github.com/opencontainers/runtime-spec 1c7c27d043c2a5e513a44084d2b10d77d1402b8c # specs +github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 +# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) +github.com/coreos/go-systemd v4 +github.com/godbus/dbus v4.0.0 +github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 +github.com/golang/protobuf 1f49d83d9aa00e6ce4fc8258c71cc7786aec968a + +# gelf logging driver deps +github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 + +github.com/fluent/fluent-logger-golang v1.2.1 +# fluent-logger-golang deps +github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa +github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c + +# fsnotify +github.com/fsnotify/fsnotify v1.2.11 + +# awslogs deps +github.com/aws/aws-sdk-go v1.4.22 +github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 +github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 + +# logentries +github.com/bsphere/le_go d3308aafe090956bc89a65f0769f58251a1b4f03 + +# gcplogs deps +golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be +google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 +google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 + +# native credentials +github.com/docker/docker-credential-helpers f72c04f1d8e71959a6d103f808c50ccbad79b9fd + +# containerd +github.com/docker/containerd 8517738ba4b82aff5662c97ca4627e7e4d03b531 +github.com/tonistiigi/fifo 1405643975692217d6720f8b54aeee1bf2cd5cf4 + +# cluster +github.com/docker/swarmkit efd44df04cc0fd828de5947263858c3a5a2729b1 +github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 +github.com/gogo/protobuf v0.3 +github.com/cloudflare/cfssl 7fb22c8cba7ecaf98e4082d22d65800cf45e042a +github.com/google/certificate-transparency d90e65c3a07988180c5b1ece71791c0b6506826e +golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47 +github.com/hashicorp/go-memdb 608dda3b1410a73eaf3ac8b517c9ae7ebab6aa87 https://github.com/floridoo/go-memdb +github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 +github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 +github.com/coreos/pkg fa29b1d70f0beaddd4c7021607cc3c3be8ce94b8 +github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 +github.com/prometheus/client_golang 52437c81da6b127a9925d17eb3a382a2e5fd395e +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 +github.com/prometheus/common ebdfc6da46522d58825777cf1f90490a5b1ef1d8 +github.com/prometheus/procfs abf152e5f3e97f2fafac028d2cc06c1feb87ffa5 +bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675 +github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a +github.com/pkg/errors 839d9e913e063e28dfd0e6c7b7512793e0a48be9 + +# cli +github.com/spf13/cobra v1.5 https://github.com/dnephin/cobra.git +github.com/spf13/pflag dabebe21bf790f782ea4c7bbd2efc430de182afd +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff + +# metrics +github.com/docker/go-metrics 86138d05f285fd9737a99bee2d9be30866b59d72 + +# composefile +github.com/aanand/compose-file 8cff34df885ef07824138236bc4d27d359888b17 +github.com/mitchellh/mapstructure f3009df150dadf309fdee4a54ed65c124afad715 +github.com/xeipuuv/gojsonpointer e0fe6f68307607d540ed8eac07a342c33fa1b54a +github.com/xeipuuv/gojsonreference e02fc20de94c78484cd5ffb007f8af96be030a45 +github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d +gopkg.in/yaml.v2 a83829b6f1293c91addabc89d0571c246397bbf4 diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go index e19c73c37..4d5f5ae63 100644 --- a/vendor/github.com/docker/go-connections/nat/nat.go +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -155,33 +155,36 @@ type PortMapping struct { Binding PortBinding } +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + // ParsePortSpec parses a port specification string into a slice of PortMappings func ParsePortSpec(rawPort string) ([]PortMapping, error) { - proto := "tcp" - - if i := strings.LastIndex(rawPort, "/"); i != -1 { - proto = rawPort[i+1:] - rawPort = rawPort[:i] - } - if !strings.Contains(rawPort, ":") { - rawPort = fmt.Sprintf("::%s", rawPort) - } else if len(strings.Split(rawPort, ":")) == 2 { - rawPort = fmt.Sprintf(":%s", rawPort) - } + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) - parts, err := PartParser(portSpecTemplate, rawPort) + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") if err != nil { - return nil, err + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) } - - var ( - containerPort = parts["containerPort"] - rawIP = parts["ip"] - hostPort = parts["hostPort"] - ) - - if rawIP != "" && net.ParseIP(rawIP) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", rawIP) + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) } if containerPort == "" { return nil, fmt.Errorf("No port specified: %s", rawPort) @@ -230,7 +233,7 @@ func ParsePortSpec(rawPort string) ([]PortMapping, error) { } binding := PortBinding{ - HostIP: rawIP, + HostIP: ip, HostPort: hostPort, } ports = append(ports, PortMapping{Port: port, Binding: binding}) diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go index 872050205..892adf8c6 100644 --- a/vendor/github.com/docker/go-connections/nat/parse.go +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -8,6 +8,7 @@ import ( // PartParser parses and validates the specified string (data) using the specified template // e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version func PartParser(template, data string) (map[string]string, error) { // ip:public:private var ( diff --git a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go index 8a82727df..53cbb6c79 100644 --- a/vendor/github.com/docker/go-connections/sockets/tcp_socket.go +++ b/vendor/github.com/docker/go-connections/sockets/tcp_socket.go @@ -7,7 +7,7 @@ import ( ) // NewTCPSocket creates a TCP socket listener with the specified address and -// and the specified tls configuration. If TLSConfig is set, will encapsulate the +// the specified tls configuration. If TLSConfig is set, will encapsulate the // TCP listener inside a TLS one. func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { l, err := net.Listen("tcp", addr) diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 000000000..352d342a8 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,21 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" + + "github.com/Sirupsen/logrus" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + logrus.Warnf("Unable to use system certificate pool: %v", err) + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 000000000..262c95e8c --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,16 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" + + "github.com/Sirupsen/logrus" +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + logrus.Warn("Unable to use system certificate pool: requires building with go 1.7 or later") + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go index 2cd50a62c..8bbffcfd3 100644 --- a/vendor/github.com/docker/go-connections/tlsconfig/config.go +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -68,10 +68,13 @@ func ClientDefault() *tls.Config { // certPool returns an X.509 certificate pool from `caFile`, the certificate file. func certPool(caFile string) (*x509.CertPool, error) { // If we should verify the server, we need to load a trusted ca - certPool := x509.NewCertPool() + certPool, err := SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } pem, err := ioutil.ReadFile(caFile) if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) } if !certPool.AppendCertsFromPEM(pem) { return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go index d4a645490..544b5dd3e 100644 --- a/vendor/github.com/docker/go-units/duration.go +++ b/vendor/github.com/docker/go-units/duration.go @@ -26,7 +26,7 @@ func HumanDuration(d time.Duration) string { return fmt.Sprintf("%d hours", hours) } else if hours < 24*7*2 { return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { + } else if hours < 24*30*2 { return fmt.Sprintf("%d weeks", hours/24/7) } else if hours < 24*365*2 { return fmt.Sprintf("%d months", hours/24/30) diff --git a/vendor/github.com/gorilla/handlers/.travis.yml b/vendor/github.com/gorilla/handlers/.travis.yml new file mode 100644 index 000000000..4ea1e7a1f --- /dev/null +++ b/vendor/github.com/gorilla/handlers/.travis.yml @@ -0,0 +1,18 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/vendor/github.com/gorilla/handlers/LICENSE b/vendor/github.com/gorilla/handlers/LICENSE new file mode 100644 index 000000000..66ea3c8ae --- /dev/null +++ b/vendor/github.com/gorilla/handlers/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 The Gorilla Handlers Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/handlers/README.md b/vendor/github.com/gorilla/handlers/README.md new file mode 100644 index 000000000..a782c4152 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/README.md @@ -0,0 +1,53 @@ +gorilla/handlers +================ +[![GoDoc](https://godoc.org/github.com/gorilla/handlers?status.svg)](https://godoc.org/github.com/gorilla/handlers) [![Build Status](https://travis-ci.org/gorilla/handlers.svg?branch=master)](https://travis-ci.org/gorilla/handlers) + +Package handlers is a collection of handlers (aka "HTTP middleware") for use +with Go's `net/http` package (or any framework supporting `http.Handler`), including: + +* [**LoggingHandler**](https://godoc.org/github.com/gorilla/handlers#LoggingHandler) for logging HTTP requests in the Apache [Common Log + Format](http://httpd.apache.org/docs/2.2/logs.html#common). +* [**CombinedLoggingHandler**](https://godoc.org/github.com/gorilla/handlers#CombinedLoggingHandler) for logging HTTP requests in the Apache [Combined Log + Format](http://httpd.apache.org/docs/2.2/logs.html#combined) commonly used by + both Apache and nginx. +* [**CompressHandler**](https://godoc.org/github.com/gorilla/handlers#CompressHandler) for gzipping responses. +* [**ContentTypeHandler**](https://godoc.org/github.com/gorilla/handlers#ContentTypeHandler) for validating requests against a list of accepted + content types. +* [**MethodHandler**](https://godoc.org/github.com/gorilla/handlers#MethodHandler) for matching HTTP methods against handlers in a + `map[string]http.Handler` +* [**ProxyHeaders**](https://godoc.org/github.com/gorilla/handlers#ProxyHeaders) for populating `r.RemoteAddr` and `r.URL.Scheme` based on the + `X-Forwarded-For`, `X-Real-IP`, `X-Forwarded-Proto` and RFC7239 `Forwarded` + headers when running a Go server behind a HTTP reverse proxy. +* [**CanonicalHost**](https://godoc.org/github.com/gorilla/handlers#CanonicalHost) for re-directing to the preferred host when handling multiple + domains (i.e. multiple CNAME aliases). +* [**RecoveryHandler**](https://godoc.org/github.com/gorilla/handlers#RecoveryHandler) for recovering from unexpected panics. + +Other handlers are documented [on the Gorilla +website](http://www.gorillatoolkit.org/pkg/handlers). + +## Example + +A simple example using `handlers.LoggingHandler` and `handlers.CompressHandler`: + +```go +import ( + "net/http" + "github.com/gorilla/handlers" +) + +func main() { + r := http.NewServeMux() + + // Only log requests to our admin dashboard to stdout + r.Handle("/admin", handlers.LoggingHandler(os.Stdout, http.HandlerFunc(ShowAdminDashboard))) + r.HandleFunc("/", ShowIndex) + + // Wrap our server with our gzip handler to gzip compress all responses. + http.ListenAndServe(":8000", handlers.CompressHandler(r)) +} +``` + +## License + +BSD licensed. See the included LICENSE file for details. + diff --git a/vendor/github.com/gorilla/handlers/canonical.go b/vendor/github.com/gorilla/handlers/canonical.go new file mode 100644 index 000000000..8437fefc1 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/canonical.go @@ -0,0 +1,74 @@ +package handlers + +import ( + "net/http" + "net/url" + "strings" +) + +type canonical struct { + h http.Handler + domain string + code int +} + +// CanonicalHost is HTTP middleware that re-directs requests to the canonical +// domain. It accepts a domain and a status code (e.g. 301 or 302) and +// re-directs clients to this domain. The existing request path is maintained. +// +// Note: If the provided domain is considered invalid by url.Parse or otherwise +// returns an empty scheme or host, clients are not re-directed. +// +// Example: +// +// r := mux.NewRouter() +// canonical := handlers.CanonicalHost("http://www.gorillatoolkit.org", 302) +// r.HandleFunc("/route", YourHandler) +// +// log.Fatal(http.ListenAndServe(":7000", canonical(r))) +// +func CanonicalHost(domain string, code int) func(h http.Handler) http.Handler { + fn := func(h http.Handler) http.Handler { + return canonical{h, domain, code} + } + + return fn +} + +func (c canonical) ServeHTTP(w http.ResponseWriter, r *http.Request) { + dest, err := url.Parse(c.domain) + if err != nil { + // Call the next handler if the provided domain fails to parse. + c.h.ServeHTTP(w, r) + return + } + + if dest.Scheme == "" || dest.Host == "" { + // Call the next handler if the scheme or host are empty. + // Note that url.Parse won't fail on in this case. + c.h.ServeHTTP(w, r) + return + } + + if !strings.EqualFold(cleanHost(r.Host), dest.Host) { + // Re-build the destination URL + dest := dest.Scheme + "://" + dest.Host + r.URL.Path + if r.URL.RawQuery != "" { + dest += "?" + r.URL.RawQuery + } + http.Redirect(w, r, dest, c.code) + return + } + + c.h.ServeHTTP(w, r) +} + +// cleanHost cleans invalid Host headers by stripping anything after '/' or ' '. +// This is backported from Go 1.5 (in response to issue #11206) and attempts to +// mitigate malformed Host headers that do not match the format in RFC7230. +func cleanHost(in string) string { + if i := strings.IndexAny(in, " /"); i != -1 { + return in[:i] + } + return in +} diff --git a/vendor/github.com/gorilla/handlers/compress.go b/vendor/github.com/gorilla/handlers/compress.go new file mode 100644 index 000000000..e8345d792 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/compress.go @@ -0,0 +1,148 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "compress/flate" + "compress/gzip" + "io" + "net/http" + "strings" +) + +type compressResponseWriter struct { + io.Writer + http.ResponseWriter + http.Hijacker + http.Flusher + http.CloseNotifier +} + +func (w *compressResponseWriter) WriteHeader(c int) { + w.ResponseWriter.Header().Del("Content-Length") + w.ResponseWriter.WriteHeader(c) +} + +func (w *compressResponseWriter) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *compressResponseWriter) Write(b []byte) (int, error) { + h := w.ResponseWriter.Header() + if h.Get("Content-Type") == "" { + h.Set("Content-Type", http.DetectContentType(b)) + } + h.Del("Content-Length") + + return w.Writer.Write(b) +} + +type flusher interface { + Flush() error +} + +func (w *compressResponseWriter) Flush() { + // Flush compressed data if compressor supports it. + if f, ok := w.Writer.(flusher); ok { + f.Flush() + } + // Flush HTTP response. + if w.Flusher != nil { + w.Flusher.Flush() + } +} + +// CompressHandler gzip compresses HTTP responses for clients that support it +// via the 'Accept-Encoding' header. +// +// Compressing TLS traffic may leak the page contents to an attacker if the +// page contains user input: http://security.stackexchange.com/a/102015/12208 +func CompressHandler(h http.Handler) http.Handler { + return CompressHandlerLevel(h, gzip.DefaultCompression) +} + +// CompressHandlerLevel gzip compresses HTTP responses with specified compression level +// for clients that support it via the 'Accept-Encoding' header. +// +// The compression level should be gzip.DefaultCompression, gzip.NoCompression, +// or any integer value between gzip.BestSpeed and gzip.BestCompression inclusive. +// gzip.DefaultCompression is used in case of invalid compression level. +func CompressHandlerLevel(h http.Handler, level int) http.Handler { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + level = gzip.DefaultCompression + } + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + L: + for _, enc := range strings.Split(r.Header.Get("Accept-Encoding"), ",") { + switch strings.TrimSpace(enc) { + case "gzip": + w.Header().Set("Content-Encoding", "gzip") + w.Header().Add("Vary", "Accept-Encoding") + + gw, _ := gzip.NewWriterLevel(w, level) + defer gw.Close() + + h, hok := w.(http.Hijacker) + if !hok { /* w is not Hijacker... oh well... */ + h = nil + } + + f, fok := w.(http.Flusher) + if !fok { + f = nil + } + + cn, cnok := w.(http.CloseNotifier) + if !cnok { + cn = nil + } + + w = &compressResponseWriter{ + Writer: gw, + ResponseWriter: w, + Hijacker: h, + Flusher: f, + CloseNotifier: cn, + } + + break L + case "deflate": + w.Header().Set("Content-Encoding", "deflate") + w.Header().Add("Vary", "Accept-Encoding") + + fw, _ := flate.NewWriter(w, level) + defer fw.Close() + + h, hok := w.(http.Hijacker) + if !hok { /* w is not Hijacker... oh well... */ + h = nil + } + + f, fok := w.(http.Flusher) + if !fok { + f = nil + } + + cn, cnok := w.(http.CloseNotifier) + if !cnok { + cn = nil + } + + w = &compressResponseWriter{ + Writer: fw, + ResponseWriter: w, + Hijacker: h, + Flusher: f, + CloseNotifier: cn, + } + + break L + } + } + + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/handlers/cors.go b/vendor/github.com/gorilla/handlers/cors.go new file mode 100644 index 000000000..1f92d1ad4 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/cors.go @@ -0,0 +1,317 @@ +package handlers + +import ( + "net/http" + "strconv" + "strings" +) + +// CORSOption represents a functional option for configuring the CORS middleware. +type CORSOption func(*cors) error + +type cors struct { + h http.Handler + allowedHeaders []string + allowedMethods []string + allowedOrigins []string + allowedOriginValidator OriginValidator + exposedHeaders []string + maxAge int + ignoreOptions bool + allowCredentials bool +} + +// OriginValidator takes an origin string and returns whether or not that origin is allowed. +type OriginValidator func(string) bool + +var ( + defaultCorsMethods = []string{"GET", "HEAD", "POST"} + defaultCorsHeaders = []string{"Accept", "Accept-Language", "Content-Language", "Origin"} + // (WebKit/Safari v9 sends the Origin header by default in AJAX requests) +) + +const ( + corsOptionMethod string = "OPTIONS" + corsAllowOriginHeader string = "Access-Control-Allow-Origin" + corsExposeHeadersHeader string = "Access-Control-Expose-Headers" + corsMaxAgeHeader string = "Access-Control-Max-Age" + corsAllowMethodsHeader string = "Access-Control-Allow-Methods" + corsAllowHeadersHeader string = "Access-Control-Allow-Headers" + corsAllowCredentialsHeader string = "Access-Control-Allow-Credentials" + corsRequestMethodHeader string = "Access-Control-Request-Method" + corsRequestHeadersHeader string = "Access-Control-Request-Headers" + corsOriginHeader string = "Origin" + corsVaryHeader string = "Vary" + corsOriginMatchAll string = "*" +) + +func (ch *cors) ServeHTTP(w http.ResponseWriter, r *http.Request) { + origin := r.Header.Get(corsOriginHeader) + if !ch.isOriginAllowed(origin) { + ch.h.ServeHTTP(w, r) + return + } + + if r.Method == corsOptionMethod { + if ch.ignoreOptions { + ch.h.ServeHTTP(w, r) + return + } + + if _, ok := r.Header[corsRequestMethodHeader]; !ok { + w.WriteHeader(http.StatusBadRequest) + return + } + + method := r.Header.Get(corsRequestMethodHeader) + if !ch.isMatch(method, ch.allowedMethods) { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + requestHeaders := strings.Split(r.Header.Get(corsRequestHeadersHeader), ",") + allowedHeaders := []string{} + for _, v := range requestHeaders { + canonicalHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) + if canonicalHeader == "" || ch.isMatch(canonicalHeader, defaultCorsHeaders) { + continue + } + + if !ch.isMatch(canonicalHeader, ch.allowedHeaders) { + w.WriteHeader(http.StatusForbidden) + return + } + + allowedHeaders = append(allowedHeaders, canonicalHeader) + } + + if len(allowedHeaders) > 0 { + w.Header().Set(corsAllowHeadersHeader, strings.Join(allowedHeaders, ",")) + } + + if ch.maxAge > 0 { + w.Header().Set(corsMaxAgeHeader, strconv.Itoa(ch.maxAge)) + } + + if !ch.isMatch(method, defaultCorsMethods) { + w.Header().Set(corsAllowMethodsHeader, method) + } + } else { + if len(ch.exposedHeaders) > 0 { + w.Header().Set(corsExposeHeadersHeader, strings.Join(ch.exposedHeaders, ",")) + } + } + + if ch.allowCredentials { + w.Header().Set(corsAllowCredentialsHeader, "true") + } + + if len(ch.allowedOrigins) > 1 { + w.Header().Set(corsVaryHeader, corsOriginHeader) + } + + w.Header().Set(corsAllowOriginHeader, origin) + + if r.Method == corsOptionMethod { + return + } + ch.h.ServeHTTP(w, r) +} + +// CORS provides Cross-Origin Resource Sharing middleware. +// Example: +// +// import ( +// "net/http" +// +// "github.com/gorilla/handlers" +// "github.com/gorilla/mux" +// ) +// +// func main() { +// r := mux.NewRouter() +// r.HandleFunc("/users", UserEndpoint) +// r.HandleFunc("/projects", ProjectEndpoint) +// +// // Apply the CORS middleware to our top-level router, with the defaults. +// http.ListenAndServe(":8000", handlers.CORS()(r)) +// } +// +func CORS(opts ...CORSOption) func(http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + ch := parseCORSOptions(opts...) + ch.h = h + return ch + } +} + +func parseCORSOptions(opts ...CORSOption) *cors { + ch := &cors{ + allowedMethods: defaultCorsMethods, + allowedHeaders: defaultCorsHeaders, + allowedOrigins: []string{corsOriginMatchAll}, + } + + for _, option := range opts { + option(ch) + } + + return ch +} + +// +// Functional options for configuring CORS. +// + +// AllowedHeaders adds the provided headers to the list of allowed headers in a +// CORS request. +// This is an append operation so the headers Accept, Accept-Language, +// and Content-Language are always allowed. +// Content-Type must be explicitly declared if accepting Content-Types other than +// application/x-www-form-urlencoded, multipart/form-data, or text/plain. +func AllowedHeaders(headers []string) CORSOption { + return func(ch *cors) error { + for _, v := range headers { + normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) + if normalizedHeader == "" { + continue + } + + if !ch.isMatch(normalizedHeader, ch.allowedHeaders) { + ch.allowedHeaders = append(ch.allowedHeaders, normalizedHeader) + } + } + + return nil + } +} + +// AllowedMethods can be used to explicitly allow methods in the +// Access-Control-Allow-Methods header. +// This is a replacement operation so you must also +// pass GET, HEAD, and POST if you wish to support those methods. +func AllowedMethods(methods []string) CORSOption { + return func(ch *cors) error { + ch.allowedMethods = []string{} + for _, v := range methods { + normalizedMethod := strings.ToUpper(strings.TrimSpace(v)) + if normalizedMethod == "" { + continue + } + + if !ch.isMatch(normalizedMethod, ch.allowedMethods) { + ch.allowedMethods = append(ch.allowedMethods, normalizedMethod) + } + } + + return nil + } +} + +// AllowedOrigins sets the allowed origins for CORS requests, as used in the +// 'Allow-Access-Control-Origin' HTTP header. +// Note: Passing in a []string{"*"} will allow any domain. +func AllowedOrigins(origins []string) CORSOption { + return func(ch *cors) error { + for _, v := range origins { + if v == corsOriginMatchAll { + ch.allowedOrigins = []string{corsOriginMatchAll} + return nil + } + } + + ch.allowedOrigins = origins + return nil + } +} + +// AllowedOriginValidator sets a function for evaluating allowed origins in CORS requests, represented by the +// 'Allow-Access-Control-Origin' HTTP header. +func AllowedOriginValidator(fn OriginValidator) CORSOption { + return func(ch *cors) error { + ch.allowedOriginValidator = fn + return nil + } +} + +// ExposeHeaders can be used to specify headers that are available +// and will not be stripped out by the user-agent. +func ExposedHeaders(headers []string) CORSOption { + return func(ch *cors) error { + ch.exposedHeaders = []string{} + for _, v := range headers { + normalizedHeader := http.CanonicalHeaderKey(strings.TrimSpace(v)) + if normalizedHeader == "" { + continue + } + + if !ch.isMatch(normalizedHeader, ch.exposedHeaders) { + ch.exposedHeaders = append(ch.exposedHeaders, normalizedHeader) + } + } + + return nil + } +} + +// MaxAge determines the maximum age (in seconds) between preflight requests. A +// maximum of 10 minutes is allowed. An age above this value will default to 10 +// minutes. +func MaxAge(age int) CORSOption { + return func(ch *cors) error { + // Maximum of 10 minutes. + if age > 600 { + age = 600 + } + + ch.maxAge = age + return nil + } +} + +// IgnoreOptions causes the CORS middleware to ignore OPTIONS requests, instead +// passing them through to the next handler. This is useful when your application +// or framework has a pre-existing mechanism for responding to OPTIONS requests. +func IgnoreOptions() CORSOption { + return func(ch *cors) error { + ch.ignoreOptions = true + return nil + } +} + +// AllowCredentials can be used to specify that the user agent may pass +// authentication details along with the request. +func AllowCredentials() CORSOption { + return func(ch *cors) error { + ch.allowCredentials = true + return nil + } +} + +func (ch *cors) isOriginAllowed(origin string) bool { + if origin == "" { + return false + } + + if ch.allowedOriginValidator != nil { + return ch.allowedOriginValidator(origin) + } + + for _, allowedOrigin := range ch.allowedOrigins { + if allowedOrigin == origin || allowedOrigin == corsOriginMatchAll { + return true + } + } + + return false +} + +func (ch *cors) isMatch(needle string, haystack []string) bool { + for _, v := range haystack { + if v == needle { + return true + } + } + + return false +} diff --git a/vendor/github.com/gorilla/handlers/doc.go b/vendor/github.com/gorilla/handlers/doc.go new file mode 100644 index 000000000..944e5a8ae --- /dev/null +++ b/vendor/github.com/gorilla/handlers/doc.go @@ -0,0 +1,9 @@ +/* +Package handlers is a collection of handlers (aka "HTTP middleware") for use +with Go's net/http package (or any framework supporting http.Handler). + +The package includes handlers for logging in standardised formats, compressing +HTTP responses, validating content types and other useful tools for manipulating +requests and responses. +*/ +package handlers diff --git a/vendor/github.com/gorilla/handlers/handlers.go b/vendor/github.com/gorilla/handlers/handlers.go new file mode 100644 index 000000000..9544d2f0a --- /dev/null +++ b/vendor/github.com/gorilla/handlers/handlers.go @@ -0,0 +1,403 @@ +// Copyright 2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package handlers + +import ( + "bufio" + "fmt" + "io" + "net" + "net/http" + "net/url" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// MethodHandler is an http.Handler that dispatches to a handler whose key in the +// MethodHandler's map matches the name of the HTTP request's method, eg: GET +// +// If the request's method is OPTIONS and OPTIONS is not a key in the map then +// the handler responds with a status of 200 and sets the Allow header to a +// comma-separated list of available methods. +// +// If the request's method doesn't match any of its keys the handler responds +// with a status of HTTP 405 "Method Not Allowed" and sets the Allow header to a +// comma-separated list of available methods. +type MethodHandler map[string]http.Handler + +func (h MethodHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if handler, ok := h[req.Method]; ok { + handler.ServeHTTP(w, req) + } else { + allow := []string{} + for k := range h { + allow = append(allow, k) + } + sort.Strings(allow) + w.Header().Set("Allow", strings.Join(allow, ", ")) + if req.Method == "OPTIONS" { + w.WriteHeader(http.StatusOK) + } else { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } + } +} + +// loggingHandler is the http.Handler implementation for LoggingHandlerTo and its +// friends +type loggingHandler struct { + writer io.Writer + handler http.Handler +} + +// combinedLoggingHandler is the http.Handler implementation for LoggingHandlerTo +// and its friends +type combinedLoggingHandler struct { + writer io.Writer + handler http.Handler +} + +func (h loggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + t := time.Now() + logger := makeLogger(w) + url := *req.URL + h.handler.ServeHTTP(logger, req) + writeLog(h.writer, req, url, t, logger.Status(), logger.Size()) +} + +func (h combinedLoggingHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + t := time.Now() + logger := makeLogger(w) + url := *req.URL + h.handler.ServeHTTP(logger, req) + writeCombinedLog(h.writer, req, url, t, logger.Status(), logger.Size()) +} + +func makeLogger(w http.ResponseWriter) loggingResponseWriter { + var logger loggingResponseWriter = &responseLogger{w: w} + if _, ok := w.(http.Hijacker); ok { + logger = &hijackLogger{responseLogger{w: w}} + } + h, ok1 := logger.(http.Hijacker) + c, ok2 := w.(http.CloseNotifier) + if ok1 && ok2 { + return hijackCloseNotifier{logger, h, c} + } + if ok2 { + return &closeNotifyWriter{logger, c} + } + return logger +} + +type loggingResponseWriter interface { + http.ResponseWriter + http.Flusher + Status() int + Size() int +} + +// responseLogger is wrapper of http.ResponseWriter that keeps track of its HTTP +// status code and body size +type responseLogger struct { + w http.ResponseWriter + status int + size int +} + +func (l *responseLogger) Header() http.Header { + return l.w.Header() +} + +func (l *responseLogger) Write(b []byte) (int, error) { + if l.status == 0 { + // The status will be StatusOK if WriteHeader has not been called yet + l.status = http.StatusOK + } + size, err := l.w.Write(b) + l.size += size + return size, err +} + +func (l *responseLogger) WriteHeader(s int) { + l.w.WriteHeader(s) + l.status = s +} + +func (l *responseLogger) Status() int { + return l.status +} + +func (l *responseLogger) Size() int { + return l.size +} + +func (l *responseLogger) Flush() { + f, ok := l.w.(http.Flusher) + if ok { + f.Flush() + } +} + +type hijackLogger struct { + responseLogger +} + +func (l *hijackLogger) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h := l.responseLogger.w.(http.Hijacker) + conn, rw, err := h.Hijack() + if err == nil && l.responseLogger.status == 0 { + // The status will be StatusSwitchingProtocols if there was no error and + // WriteHeader has not been called yet + l.responseLogger.status = http.StatusSwitchingProtocols + } + return conn, rw, err +} + +type closeNotifyWriter struct { + loggingResponseWriter + http.CloseNotifier +} + +type hijackCloseNotifier struct { + loggingResponseWriter + http.Hijacker + http.CloseNotifier +} + +const lowerhex = "0123456789abcdef" + +func appendQuoted(buf []byte, s string) []byte { + var runeTmp [utf8.UTFMax]byte + for width := 0; len(s) > 0; s = s[width:] { + r := rune(s[0]) + width = 1 + if r >= utf8.RuneSelf { + r, width = utf8.DecodeRuneInString(s) + } + if width == 1 && r == utf8.RuneError { + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + continue + } + if r == rune('"') || r == '\\' { // always backslashed + buf = append(buf, '\\') + buf = append(buf, byte(r)) + continue + } + if strconv.IsPrint(r) { + n := utf8.EncodeRune(runeTmp[:], r) + buf = append(buf, runeTmp[:n]...) + continue + } + switch r { + case '\a': + buf = append(buf, `\a`...) + case '\b': + buf = append(buf, `\b`...) + case '\f': + buf = append(buf, `\f`...) + case '\n': + buf = append(buf, `\n`...) + case '\r': + buf = append(buf, `\r`...) + case '\t': + buf = append(buf, `\t`...) + case '\v': + buf = append(buf, `\v`...) + default: + switch { + case r < ' ': + buf = append(buf, `\x`...) + buf = append(buf, lowerhex[s[0]>>4]) + buf = append(buf, lowerhex[s[0]&0xF]) + case r > utf8.MaxRune: + r = 0xFFFD + fallthrough + case r < 0x10000: + buf = append(buf, `\u`...) + for s := 12; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + default: + buf = append(buf, `\U`...) + for s := 28; s >= 0; s -= 4 { + buf = append(buf, lowerhex[r>>uint(s)&0xF]) + } + } + } + } + return buf + +} + +// buildCommonLogLine builds a log entry for req in Apache Common Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func buildCommonLogLine(req *http.Request, url url.URL, ts time.Time, status int, size int) []byte { + username := "-" + if url.User != nil { + if name := url.User.Username(); name != "" { + username = name + } + } + + host, _, err := net.SplitHostPort(req.RemoteAddr) + + if err != nil { + host = req.RemoteAddr + } + + uri := req.RequestURI + + // Requests using the CONNECT method over HTTP/2.0 must use + // the authority field (aka r.Host) to identify the target. + // Refer: https://httpwg.github.io/specs/rfc7540.html#CONNECT + if req.ProtoMajor == 2 && req.Method == "CONNECT" { + uri = req.Host + } + if uri == "" { + uri = url.RequestURI() + } + + buf := make([]byte, 0, 3*(len(host)+len(username)+len(req.Method)+len(uri)+len(req.Proto)+50)/2) + buf = append(buf, host...) + buf = append(buf, " - "...) + buf = append(buf, username...) + buf = append(buf, " ["...) + buf = append(buf, ts.Format("02/Jan/2006:15:04:05 -0700")...) + buf = append(buf, `] "`...) + buf = append(buf, req.Method...) + buf = append(buf, " "...) + buf = appendQuoted(buf, uri) + buf = append(buf, " "...) + buf = append(buf, req.Proto...) + buf = append(buf, `" `...) + buf = append(buf, strconv.Itoa(status)...) + buf = append(buf, " "...) + buf = append(buf, strconv.Itoa(size)...) + return buf +} + +// writeLog writes a log entry for req to w in Apache Common Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func writeLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { + buf := buildCommonLogLine(req, url, ts, status, size) + buf = append(buf, '\n') + w.Write(buf) +} + +// writeCombinedLog writes a log entry for req to w in Apache Combined Log Format. +// ts is the timestamp with which the entry should be logged. +// status and size are used to provide the response HTTP status and size. +func writeCombinedLog(w io.Writer, req *http.Request, url url.URL, ts time.Time, status, size int) { + buf := buildCommonLogLine(req, url, ts, status, size) + buf = append(buf, ` "`...) + buf = appendQuoted(buf, req.Referer()) + buf = append(buf, `" "`...) + buf = appendQuoted(buf, req.UserAgent()) + buf = append(buf, '"', '\n') + w.Write(buf) +} + +// CombinedLoggingHandler return a http.Handler that wraps h and logs requests to out in +// Apache Combined Log Format. +// +// See http://httpd.apache.org/docs/2.2/logs.html#combined for a description of this format. +// +// LoggingHandler always sets the ident field of the log to - +func CombinedLoggingHandler(out io.Writer, h http.Handler) http.Handler { + return combinedLoggingHandler{out, h} +} + +// LoggingHandler return a http.Handler that wraps h and logs requests to out in +// Apache Common Log Format (CLF). +// +// See http://httpd.apache.org/docs/2.2/logs.html#common for a description of this format. +// +// LoggingHandler always sets the ident field of the log to - +// +// Example: +// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// w.Write([]byte("This is a catch-all route")) +// }) +// loggedRouter := handlers.LoggingHandler(os.Stdout, r) +// http.ListenAndServe(":1123", loggedRouter) +// +func LoggingHandler(out io.Writer, h http.Handler) http.Handler { + return loggingHandler{out, h} +} + +// isContentType validates the Content-Type header matches the supplied +// contentType. That is, its type and subtype match. +func isContentType(h http.Header, contentType string) bool { + ct := h.Get("Content-Type") + if i := strings.IndexRune(ct, ';'); i != -1 { + ct = ct[0:i] + } + return ct == contentType +} + +// ContentTypeHandler wraps and returns a http.Handler, validating the request +// content type is compatible with the contentTypes list. It writes a HTTP 415 +// error if that fails. +// +// Only PUT, POST, and PATCH requests are considered. +func ContentTypeHandler(h http.Handler, contentTypes ...string) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !(r.Method == "PUT" || r.Method == "POST" || r.Method == "PATCH") { + h.ServeHTTP(w, r) + return + } + + for _, ct := range contentTypes { + if isContentType(r.Header, ct) { + h.ServeHTTP(w, r) + return + } + } + http.Error(w, fmt.Sprintf("Unsupported content type %q; expected one of %q", r.Header.Get("Content-Type"), contentTypes), http.StatusUnsupportedMediaType) + }) +} + +const ( + // HTTPMethodOverrideHeader is a commonly used + // http header to override a request method. + HTTPMethodOverrideHeader = "X-HTTP-Method-Override" + // HTTPMethodOverrideFormKey is a commonly used + // HTML form key to override a request method. + HTTPMethodOverrideFormKey = "_method" +) + +// HTTPMethodOverrideHandler wraps and returns a http.Handler which checks for +// the X-HTTP-Method-Override header or the _method form key, and overrides (if +// valid) request.Method with its value. +// +// This is especially useful for HTTP clients that don't support many http verbs. +// It isn't secure to override e.g a GET to a POST, so only POST requests are +// considered. Likewise, the override method can only be a "write" method: PUT, +// PATCH or DELETE. +// +// Form method takes precedence over header method. +func HTTPMethodOverrideHandler(h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == "POST" { + om := r.FormValue(HTTPMethodOverrideFormKey) + if om == "" { + om = r.Header.Get(HTTPMethodOverrideHeader) + } + if om == "PUT" || om == "PATCH" || om == "DELETE" { + r.Method = om + } + } + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/github.com/gorilla/handlers/proxy_headers.go b/vendor/github.com/gorilla/handlers/proxy_headers.go new file mode 100644 index 000000000..0be750fd7 --- /dev/null +++ b/vendor/github.com/gorilla/handlers/proxy_headers.go @@ -0,0 +1,120 @@ +package handlers + +import ( + "net/http" + "regexp" + "strings" +) + +var ( + // De-facto standard header keys. + xForwardedFor = http.CanonicalHeaderKey("X-Forwarded-For") + xForwardedHost = http.CanonicalHeaderKey("X-Forwarded-Host") + xForwardedProto = http.CanonicalHeaderKey("X-Forwarded-Proto") + xForwardedScheme = http.CanonicalHeaderKey("X-Forwarded-Scheme") + xRealIP = http.CanonicalHeaderKey("X-Real-IP") +) + +var ( + // RFC7239 defines a new "Forwarded: " header designed to replace the + // existing use of X-Forwarded-* headers. + // e.g. Forwarded: for=192.0.2.60;proto=https;by=203.0.113.43 + forwarded = http.CanonicalHeaderKey("Forwarded") + // Allows for a sub-match of the first value after 'for=' to the next + // comma, semi-colon or space. The match is case-insensitive. + forRegex = regexp.MustCompile(`(?i)(?:for=)([^(;|,| )]+)`) + // Allows for a sub-match for the first instance of scheme (http|https) + // prefixed by 'proto='. The match is case-insensitive. + protoRegex = regexp.MustCompile(`(?i)(?:proto=)(https|http)`) +) + +// ProxyHeaders inspects common reverse proxy headers and sets the corresponding +// fields in the HTTP request struct. These are X-Forwarded-For and X-Real-IP +// for the remote (client) IP address, X-Forwarded-Proto or X-Forwarded-Scheme +// for the scheme (http|https) and the RFC7239 Forwarded header, which may +// include both client IPs and schemes. +// +// NOTE: This middleware should only be used when behind a reverse +// proxy like nginx, HAProxy or Apache. Reverse proxies that don't (or are +// configured not to) strip these headers from client requests, or where these +// headers are accepted "as is" from a remote client (e.g. when Go is not behind +// a proxy), can manifest as a vulnerability if your application uses these +// headers for validating the 'trustworthiness' of a request. +func ProxyHeaders(h http.Handler) http.Handler { + fn := func(w http.ResponseWriter, r *http.Request) { + // Set the remote IP with the value passed from the proxy. + if fwd := getIP(r); fwd != "" { + r.RemoteAddr = fwd + } + + // Set the scheme (proto) with the value passed from the proxy. + if scheme := getScheme(r); scheme != "" { + r.URL.Scheme = scheme + } + // Set the host with the value passed by the proxy + if r.Header.Get(xForwardedHost) != "" { + r.Host = r.Header.Get(xForwardedHost) + } + // Call the next handler in the chain. + h.ServeHTTP(w, r) + } + + return http.HandlerFunc(fn) +} + +// getIP retrieves the IP from the X-Forwarded-For, X-Real-IP and RFC7239 +// Forwarded headers (in that order). +func getIP(r *http.Request) string { + var addr string + + if fwd := r.Header.Get(xForwardedFor); fwd != "" { + // Only grab the first (client) address. Note that '192.168.0.1, + // 10.1.1.1' is a valid key for X-Forwarded-For where addresses after + // the first may represent forwarding proxies earlier in the chain. + s := strings.Index(fwd, ", ") + if s == -1 { + s = len(fwd) + } + addr = fwd[:s] + } else if fwd := r.Header.Get(xRealIP); fwd != "" { + // X-Real-IP should only contain one IP address (the client making the + // request). + addr = fwd + } else if fwd := r.Header.Get(forwarded); fwd != "" { + // match should contain at least two elements if the protocol was + // specified in the Forwarded header. The first element will always be + // the 'for=' capture, which we ignore. In the case of multiple IP + // addresses (for=8.8.8.8, 8.8.4.4,172.16.1.20 is valid) we only + // extract the first, which should be the client IP. + if match := forRegex.FindStringSubmatch(fwd); len(match) > 1 { + // IPv6 addresses in Forwarded headers are quoted-strings. We strip + // these quotes. + addr = strings.Trim(match[1], `"`) + } + } + + return addr +} + +// getScheme retrieves the scheme from the X-Forwarded-Proto and RFC7239 +// Forwarded headers (in that order). +func getScheme(r *http.Request) string { + var scheme string + + // Retrieve the scheme from X-Forwarded-Proto. + if proto := r.Header.Get(xForwardedProto); proto != "" { + scheme = strings.ToLower(proto) + } else if proto = r.Header.Get(xForwardedScheme); proto != "" { + scheme = strings.ToLower(proto) + } else if proto = r.Header.Get(forwarded); proto != "" { + // match should contain at least two elements if the protocol was + // specified in the Forwarded header. The first element will always be + // the 'proto=' capture, which we ignore. In the case of multiple proto + // parameters (invalid) we only extract the first. + if match := protoRegex.FindStringSubmatch(proto); len(match) > 1 { + scheme = strings.ToLower(match[1]) + } + } + + return scheme +} diff --git a/vendor/github.com/gorilla/handlers/recovery.go b/vendor/github.com/gorilla/handlers/recovery.go new file mode 100644 index 000000000..65b7de58a --- /dev/null +++ b/vendor/github.com/gorilla/handlers/recovery.go @@ -0,0 +1,86 @@ +package handlers + +import ( + "log" + "net/http" + "runtime/debug" +) + +type recoveryHandler struct { + handler http.Handler + logger *log.Logger + printStack bool +} + +// RecoveryOption provides a functional approach to define +// configuration for a handler; such as setting the logging +// whether or not to print strack traces on panic. +type RecoveryOption func(http.Handler) + +func parseRecoveryOptions(h http.Handler, opts ...RecoveryOption) http.Handler { + for _, option := range opts { + option(h) + } + + return h +} + +// RecoveryHandler is HTTP middleware that recovers from a panic, +// logs the panic, writes http.StatusInternalServerError, and +// continues to the next handler. +// +// Example: +// +// r := mux.NewRouter() +// r.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { +// panic("Unexpected error!") +// }) +// +// http.ListenAndServe(":1123", handlers.RecoveryHandler()(r)) +func RecoveryHandler(opts ...RecoveryOption) func(h http.Handler) http.Handler { + return func(h http.Handler) http.Handler { + r := &recoveryHandler{handler: h} + return parseRecoveryOptions(r, opts...) + } +} + +// RecoveryLogger is a functional option to override +// the default logger +func RecoveryLogger(logger *log.Logger) RecoveryOption { + return func(h http.Handler) { + r := h.(*recoveryHandler) + r.logger = logger + } +} + +// PrintRecoveryStack is a functional option to enable +// or disable printing stack traces on panic. +func PrintRecoveryStack(print bool) RecoveryOption { + return func(h http.Handler) { + r := h.(*recoveryHandler) + r.printStack = print + } +} + +func (h recoveryHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + defer func() { + if err := recover(); err != nil { + w.WriteHeader(http.StatusInternalServerError) + h.log(err) + } + }() + + h.handler.ServeHTTP(w, req) +} + +func (h recoveryHandler) log(message interface{}) { + if h.logger != nil { + h.logger.Println(message) + } else { + log.Println(message) + } + + if h.printStack { + debug.PrintStack() + } +} diff --git a/vendor/github.com/gorilla/rpc/.gitignore b/vendor/github.com/gorilla/rpc/.gitignore new file mode 100644 index 000000000..b25c15b81 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/.gitignore @@ -0,0 +1 @@ +*~ diff --git a/vendor/github.com/gorilla/rpc/.travis.yml b/vendor/github.com/gorilla/rpc/.travis.yml new file mode 100644 index 000000000..6f440f1e4 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/.travis.yml @@ -0,0 +1,19 @@ +language: go +sudo: false + +matrix: + include: + - go: 1.3 + - go: 1.4 + - go: 1.5 + - go: 1.6 + - go: 1.7 + - go: tip + allow_failures: + - go: tip + +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - go vet $(go list ./... | grep -v /vendor/) + - go test -v -race ./... diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/github.com/gorilla/rpc/LICENSE similarity index 83% rename from vendor/golang.org/x/crypto/LICENSE rename to vendor/github.com/gorilla/rpc/LICENSE index 6a66aea5e..0e5fb8728 100644 --- a/vendor/golang.org/x/crypto/LICENSE +++ b/vendor/github.com/gorilla/rpc/LICENSE @@ -1,16 +1,16 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/github.com/gorilla/rpc/README.md b/vendor/github.com/gorilla/rpc/README.md new file mode 100644 index 000000000..75c26eaa8 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/README.md @@ -0,0 +1,7 @@ +rpc +=== +[![Build Status](https://travis-ci.org/gorilla/rpc.png?branch=master)](https://travis-ci.org/gorilla/rpc) + +gorilla/rpc is a foundation for RPC over HTTP services, providing access to the exported methods of an object through HTTP requests. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/rpc diff --git a/vendor/github.com/kr/fs/LICENSE b/vendor/github.com/gorilla/rpc/v2/LICENSE similarity index 83% rename from vendor/github.com/kr/fs/LICENSE rename to vendor/github.com/gorilla/rpc/v2/LICENSE index 744875676..0e5fb8728 100644 --- a/vendor/github.com/kr/fs/LICENSE +++ b/vendor/github.com/gorilla/rpc/v2/LICENSE @@ -1,16 +1,16 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. +Copyright (c) 2012 Rodrigo Moraes. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. diff --git a/vendor/github.com/gorilla/rpc/v2/README.md b/vendor/github.com/gorilla/rpc/v2/README.md new file mode 100644 index 000000000..8f9af9a8d --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/README.md @@ -0,0 +1,6 @@ +rpc +=== + +gorilla/rpc is a foundation for RPC over HTTP services, providing access to the exported methods of an object through HTTP requests. + +Read the full documentation here: http://www.gorillatoolkit.org/pkg/rpc diff --git a/vendor/github.com/gorilla/rpc/v2/compression_selector.go b/vendor/github.com/gorilla/rpc/v2/compression_selector.go new file mode 100644 index 000000000..bbf3fd1ef --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/compression_selector.go @@ -0,0 +1,90 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "compress/flate" + "compress/gzip" + "io" + "net/http" + "strings" + "unicode" +) + +// gzipWriter writes and closes the gzip writer. +type gzipWriter struct { + w *gzip.Writer +} + +func (gw *gzipWriter) Write(p []byte) (n int, err error) { + defer gw.w.Close() + return gw.w.Write(p) +} + +// gzipEncoder implements the gzip compressed http encoder. +type gzipEncoder struct { +} + +func (enc *gzipEncoder) Encode(w http.ResponseWriter) io.Writer { + w.Header().Set("Content-Encoding", "gzip") + return &gzipWriter{gzip.NewWriter(w)} +} + +// flateWriter writes and closes the flate writer. +type flateWriter struct { + w *flate.Writer +} + +func (fw *flateWriter) Write(p []byte) (n int, err error) { + defer fw.w.Close() + return fw.w.Write(p) +} + +// flateEncoder implements the flate compressed http encoder. +type flateEncoder struct { +} + +func (enc *flateEncoder) Encode(w http.ResponseWriter) io.Writer { + fw, err := flate.NewWriter(w, flate.DefaultCompression) + if err != nil { + return w + } + w.Header().Set("Content-Encoding", "deflate") + return &flateWriter{fw} +} + +// CompressionSelector generates the compressed http encoder. +type CompressionSelector struct { +} + +// acceptedEnc returns the first compression type in "Accept-Encoding" header +// field of the request. +func acceptedEnc(req *http.Request) string { + encHeader := req.Header.Get("Accept-Encoding") + if encHeader == "" { + return "" + } + encTypes := strings.FieldsFunc(encHeader, func(r rune) bool { + return unicode.IsSpace(r) || r == ',' + }) + for _, enc := range encTypes { + if enc == "gzip" || enc == "deflate" { + return enc + } + } + return "" +} + +// Select method selects the correct compression encoder based on http HEADER. +func (_ *CompressionSelector) Select(r *http.Request) Encoder { + switch acceptedEnc(r) { + case "gzip": + return &gzipEncoder{} + case "flate": + return &flateEncoder{} + } + return DefaultEncoder +} diff --git a/vendor/github.com/gorilla/rpc/v2/doc.go b/vendor/github.com/gorilla/rpc/v2/doc.go new file mode 100644 index 000000000..301d5dc06 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/doc.go @@ -0,0 +1,81 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/rpc is a foundation for RPC over HTTP services, providing +access to the exported methods of an object through HTTP requests. + +This package derives from the standard net/rpc package but uses a single HTTP +request per call instead of persistent connections. Other differences +compared to net/rpc: + + - Multiple codecs can be registered in the same server. + - A codec is chosen based on the "Content-Type" header from the request. + - Service methods also receive http.Request as parameter. + - This package can be used on Google App Engine. + +Let's setup a server and register a codec and service: + + import ( + "http" + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json" + ) + + func init() { + s := rpc.NewServer() + s.RegisterCodec(json.NewCodec(), "application/json") + s.RegisterService(new(HelloService), "") + http.Handle("/rpc", s) + } + +This server handles requests to the "/rpc" path using a JSON codec. +A codec is tied to a content type. In the example above, the JSON codec is +registered to serve requests with "application/json" as the value for the +"Content-Type" header. If the header includes a charset definition, it is +ignored; only the media-type part is taken into account. + +A service can be registered using a name. If the name is empty, like in the +example above, it will be inferred from the service type. + +That's all about the server setup. Now let's define a simple service: + + type HelloArgs struct { + Who string + } + + type HelloReply struct { + Message string + } + + type HelloService struct {} + + func (h *HelloService) Say(r *http.Request, args *HelloArgs, reply *HelloReply) error { + reply.Message = "Hello, " + args.Who + "!" + return nil + } + +The example above defines a service with a method "HelloService.Say" and +the arguments and reply related to that method. + +The service must be exported (begin with an upper case letter) or local +(defined in the package registering the service). + +When a service is registered, the server inspects the service methods +and make available the ones that follow these rules: + + - The method name is exported. + - The method has three arguments: *http.Request, *args, *reply. + - All three arguments are pointers. + - The second and third arguments are exported or local. + - The method has return type error. + +All other methods are ignored. + +Gorilla has packages with common RPC codecs. Check out their documentation: + + JSON: http://gorilla-web.appspot.com/pkg/rpc/json +*/ +package rpc diff --git a/vendor/github.com/gorilla/rpc/v2/encoder_selector.go b/vendor/github.com/gorilla/rpc/v2/encoder_selector.go new file mode 100644 index 000000000..333361f3a --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/encoder_selector.go @@ -0,0 +1,43 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "io" + "net/http" +) + +// Encoder interface contains the encoder for http response. +// Eg. gzip, flate compressions. +type Encoder interface { + Encode(w http.ResponseWriter) io.Writer +} + +type encoder struct { +} + +func (_ *encoder) Encode(w http.ResponseWriter) io.Writer { + return w +} + +var DefaultEncoder = &encoder{} + +// EncoderSelector interface provides a way to select encoder using the http +// request. Typically people can use this to check HEADER of the request and +// figure out client capabilities. +// Eg. "Accept-Encoding" tells about supported compressions. +type EncoderSelector interface { + Select(r *http.Request) Encoder +} + +type encoderSelector struct { +} + +func (_ *encoderSelector) Select(_ *http.Request) Encoder { + return DefaultEncoder +} + +var DefaultEncoderSelector = &encoderSelector{} diff --git a/vendor/github.com/gorilla/rpc/v2/json/client.go b/vendor/github.com/gorilla/rpc/v2/json/client.go new file mode 100644 index 000000000..be05788e2 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/json/client.go @@ -0,0 +1,61 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012-2013 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding/json" + "fmt" + "io" + "math/rand" +) + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// clientRequest represents a JSON-RPC request sent by a client. +type clientRequest struct { + // A String containing the name of the method to be invoked. + Method string `json:"method"` + // Object to pass as request parameter to the method. + Params [1]interface{} `json:"params"` + // The request id. This can be of any type. It is used to match the + // response with the request that it is replying to. + Id uint64 `json:"id"` +} + +// clientResponse represents a JSON-RPC response returned to a client. +type clientResponse struct { + Result *json.RawMessage `json:"result"` + Error interface{} `json:"error"` + Id uint64 `json:"id"` +} + +// EncodeClientRequest encodes parameters for a JSON-RPC client request. +func EncodeClientRequest(method string, args interface{}) ([]byte, error) { + c := &clientRequest{ + Method: method, + Params: [1]interface{}{args}, + Id: uint64(rand.Int63()), + } + return json.Marshal(c) +} + +// DecodeClientResponse decodes the response body of a client request into +// the interface reply. +func DecodeClientResponse(r io.Reader, reply interface{}) error { + var c clientResponse + if err := json.NewDecoder(r).Decode(&c); err != nil { + return err + } + if c.Error != nil { + return &Error{Data: c.Error} + } + if c.Result == nil { + return fmt.Errorf("Unexpected null result") + } + return json.Unmarshal(*c.Result, reply) +} diff --git a/vendor/github.com/gorilla/rpc/v2/json/doc.go b/vendor/github.com/gorilla/rpc/v2/json/doc.go new file mode 100644 index 000000000..3f92b9cb3 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/json/doc.go @@ -0,0 +1,58 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gorilla/rpc/json provides a codec for JSON-RPC over HTTP services. + +To register the codec in a RPC server: + + import ( + "http" + "github.com/gorilla/rpc/v2" + "github.com/gorilla/rpc/v2/json" + ) + + func init() { + s := rpc.NewServer() + s.RegisterCodec(json.NewCodec(), "application/json") + // [...] + http.Handle("/rpc", s) + } + +A codec is tied to a content type. In the example above, the server will use +the JSON codec for requests with "application/json" as the value for the +"Content-Type" header. + +This package follows the JSON-RPC 1.0 specification: + + http://json-rpc.org/wiki/specification + +Request format is: + + method: + The name of the method to be invoked, as a string in dotted notation + as in "Service.Method". + params: + An array with a single object to pass as argument to the method. + id: + The request id, a uint. It is used to match the response with the + request that it is replying to. + +Response format is: + + result: + The Object that was returned by the invoked method, + or null in case there was an error invoking the method. + error: + An Error object if there was an error invoking the method, + or null if there was no error. + id: + The same id as the request it is responding to. + +Check the gorilla/rpc documentation for more details: + + http://gorilla-web.appspot.com/pkg/rpc +*/ +package json diff --git a/vendor/github.com/gorilla/rpc/v2/json/server.go b/vendor/github.com/gorilla/rpc/v2/json/server.go new file mode 100644 index 000000000..8fafbe3ad --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/json/server.go @@ -0,0 +1,155 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package json + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/gorilla/rpc/v2" +) + +var null = json.RawMessage([]byte("null")) + +// An Error is a wrapper for a JSON interface value. It can be used by either +// a service's handler func to write more complex JSON data to an error field +// of a server's response, or by a client to read it. +type Error struct { + Data interface{} +} + +func (e *Error) Error() string { + return fmt.Sprintf("%v", e.Data) +} + +// ---------------------------------------------------------------------------- +// Request and Response +// ---------------------------------------------------------------------------- + +// serverRequest represents a JSON-RPC request received by the server. +type serverRequest struct { + // A String containing the name of the method to be invoked. + Method string `json:"method"` + // An Array of objects to pass as arguments to the method. + Params *json.RawMessage `json:"params"` + // The request id. This can be of any type. It is used to match the + // response with the request that it is replying to. + Id *json.RawMessage `json:"id"` +} + +// serverResponse represents a JSON-RPC response returned by the server. +type serverResponse struct { + // The Object that was returned by the invoked method. This must be null + // in case there was an error invoking the method. + Result interface{} `json:"result"` + // An Error object if there was an error invoking the method. It must be + // null if there was no error. + Error interface{} `json:"error"` + // This must be the same id as the request it is responding to. + Id *json.RawMessage `json:"id"` +} + +// ---------------------------------------------------------------------------- +// Codec +// ---------------------------------------------------------------------------- + +// NewCodec returns a new JSON Codec. +func NewCodec() *Codec { + return &Codec{} +} + +// Codec creates a CodecRequest to process each request. +type Codec struct { +} + +// NewRequest returns a CodecRequest. +func (c *Codec) NewRequest(r *http.Request) rpc.CodecRequest { + return newCodecRequest(r) +} + +// ---------------------------------------------------------------------------- +// CodecRequest +// ---------------------------------------------------------------------------- + +// newCodecRequest returns a new CodecRequest. +func newCodecRequest(r *http.Request) rpc.CodecRequest { + // Decode the request body and check if RPC method is valid. + req := new(serverRequest) + err := json.NewDecoder(r.Body).Decode(req) + r.Body.Close() + return &CodecRequest{request: req, err: err} +} + +// CodecRequest decodes and encodes a single request. +type CodecRequest struct { + request *serverRequest + err error +} + +// Method returns the RPC method for the current request. +// +// The method uses a dotted notation as in "Service.Method". +func (c *CodecRequest) Method() (string, error) { + if c.err == nil { + return c.request.Method, nil + } + return "", c.err +} + +// ReadRequest fills the request object for the RPC method. +func (c *CodecRequest) ReadRequest(args interface{}) error { + if c.err == nil { + if c.request.Params != nil { + // JSON params is array value. RPC params is struct. + // Unmarshal into array containing the request struct. + params := [1]interface{}{args} + c.err = json.Unmarshal(*c.request.Params, ¶ms) + } else { + c.err = errors.New("rpc: method request ill-formed: missing params field") + } + } + return c.err +} + +// WriteResponse encodes the response and writes it to the ResponseWriter. +func (c *CodecRequest) WriteResponse(w http.ResponseWriter, reply interface{}) { + if c.request.Id != nil { + // Id is null for notifications and they don't have a response. + res := &serverResponse{ + Result: reply, + Error: &null, + Id: c.request.Id, + } + c.writeServerResponse(w, 200, res) + } +} + +func (c *CodecRequest) WriteError(w http.ResponseWriter, _ int, err error) { + res := &serverResponse{ + Result: &null, + Id: c.request.Id, + } + if jsonErr, ok := err.(*Error); ok { + res.Error = jsonErr.Data + } else { + res.Error = err.Error() + } + c.writeServerResponse(w, 400, res) +} + +func (c *CodecRequest) writeServerResponse(w http.ResponseWriter, status int, res *serverResponse) { + b, err := json.Marshal(res) + if err == nil { + w.WriteHeader(status) + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Write(b) + } else { + // Not sure in which case will this happen. But seems harmless. + rpc.WriteError(w, 400, err.Error()) + } +} diff --git a/vendor/github.com/gorilla/rpc/v2/map.go b/vendor/github.com/gorilla/rpc/v2/map.go new file mode 100644 index 000000000..dda42161c --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/map.go @@ -0,0 +1,164 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "fmt" + "net/http" + "reflect" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +var ( + // Precompute the reflect.Type of error and http.Request + typeOfError = reflect.TypeOf((*error)(nil)).Elem() + typeOfRequest = reflect.TypeOf((*http.Request)(nil)).Elem() +) + +// ---------------------------------------------------------------------------- +// service +// ---------------------------------------------------------------------------- + +type service struct { + name string // name of service + rcvr reflect.Value // receiver of methods for the service + rcvrType reflect.Type // type of the receiver + methods map[string]*serviceMethod // registered methods +} + +type serviceMethod struct { + method reflect.Method // receiver method + argsType reflect.Type // type of the request argument + replyType reflect.Type // type of the response argument +} + +// ---------------------------------------------------------------------------- +// serviceMap +// ---------------------------------------------------------------------------- + +// serviceMap is a registry for services. +type serviceMap struct { + mutex sync.Mutex + services map[string]*service +} + +// register adds a new service using reflection to extract its methods. +func (m *serviceMap) register(rcvr interface{}, name string) error { + // Setup service. + s := &service{ + name: name, + rcvr: reflect.ValueOf(rcvr), + rcvrType: reflect.TypeOf(rcvr), + methods: make(map[string]*serviceMethod), + } + if name == "" { + s.name = reflect.Indirect(s.rcvr).Type().Name() + if !isExported(s.name) { + return fmt.Errorf("rpc: type %q is not exported", s.name) + } + } + if s.name == "" { + return fmt.Errorf("rpc: no service name for type %q", + s.rcvrType.String()) + } + // Setup methods. + for i := 0; i < s.rcvrType.NumMethod(); i++ { + method := s.rcvrType.Method(i) + mtype := method.Type + // Method must be exported. + if method.PkgPath != "" { + continue + } + // Method needs four ins: receiver, *http.Request, *args, *reply. + if mtype.NumIn() != 4 { + continue + } + // First argument must be a pointer and must be http.Request. + reqType := mtype.In(1) + if reqType.Kind() != reflect.Ptr || reqType.Elem() != typeOfRequest { + continue + } + // Second argument must be a pointer and must be exported. + args := mtype.In(2) + if args.Kind() != reflect.Ptr || !isExportedOrBuiltin(args) { + continue + } + // Third argument must be a pointer and must be exported. + reply := mtype.In(3) + if reply.Kind() != reflect.Ptr || !isExportedOrBuiltin(reply) { + continue + } + // Method needs one out: error. + if mtype.NumOut() != 1 { + continue + } + if returnType := mtype.Out(0); returnType != typeOfError { + continue + } + s.methods[method.Name] = &serviceMethod{ + method: method, + argsType: args.Elem(), + replyType: reply.Elem(), + } + } + if len(s.methods) == 0 { + return fmt.Errorf("rpc: %q has no exported methods of suitable type", + s.name) + } + // Add to the map. + m.mutex.Lock() + defer m.mutex.Unlock() + if m.services == nil { + m.services = make(map[string]*service) + } else if _, ok := m.services[s.name]; ok { + return fmt.Errorf("rpc: service already defined: %q", s.name) + } + m.services[s.name] = s + return nil +} + +// get returns a registered service given a method name. +// +// The method name uses a dotted notation as in "Service.Method". +func (m *serviceMap) get(method string) (*service, *serviceMethod, error) { + parts := strings.Split(method, ".") + if len(parts) != 2 { + err := fmt.Errorf("rpc: service/method request ill-formed: %q", method) + return nil, nil, err + } + m.mutex.Lock() + service := m.services[parts[0]] + m.mutex.Unlock() + if service == nil { + err := fmt.Errorf("rpc: can't find service %q", method) + return nil, nil, err + } + serviceMethod := service.methods[parts[1]] + if serviceMethod == nil { + err := fmt.Errorf("rpc: can't find method %q", method) + return nil, nil, err + } + return service, serviceMethod, nil +} + +// isExported returns true of a string is an exported (upper case) name. +func isExported(name string) bool { + rune, _ := utf8.DecodeRuneInString(name) + return unicode.IsUpper(rune) +} + +// isExportedOrBuiltin returns true if a type is exported or a builtin. +func isExportedOrBuiltin(t reflect.Type) bool { + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + // PkgPath will be non-empty even for an exported type, + // so we need to check the type name as well. + return isExported(t.Name()) || t.PkgPath() == "" +} diff --git a/vendor/github.com/gorilla/rpc/v2/server.go b/vendor/github.com/gorilla/rpc/v2/server.go new file mode 100644 index 000000000..bd0a42db4 --- /dev/null +++ b/vendor/github.com/gorilla/rpc/v2/server.go @@ -0,0 +1,164 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rpc + +import ( + "fmt" + "net/http" + "reflect" + "strings" +) + +// ---------------------------------------------------------------------------- +// Codec +// ---------------------------------------------------------------------------- + +// Codec creates a CodecRequest to process each request. +type Codec interface { + NewRequest(*http.Request) CodecRequest +} + +// CodecRequest decodes a request and encodes a response using a specific +// serialization scheme. +type CodecRequest interface { + // Reads the request and returns the RPC method name. + Method() (string, error) + // Reads the request filling the RPC method args. + ReadRequest(interface{}) error + // Writes the response using the RPC method reply. + WriteResponse(http.ResponseWriter, interface{}) + // Writes an error produced by the server. + WriteError(w http.ResponseWriter, status int, err error) +} + +// ---------------------------------------------------------------------------- +// Server +// ---------------------------------------------------------------------------- + +// NewServer returns a new RPC server. +func NewServer() *Server { + return &Server{ + codecs: make(map[string]Codec), + services: new(serviceMap), + } +} + +// Server serves registered RPC services using registered codecs. +type Server struct { + codecs map[string]Codec + services *serviceMap +} + +// RegisterCodec adds a new codec to the server. +// +// Codecs are defined to process a given serialization scheme, e.g., JSON or +// XML. A codec is chosen based on the "Content-Type" header from the request, +// excluding the charset definition. +func (s *Server) RegisterCodec(codec Codec, contentType string) { + s.codecs[strings.ToLower(contentType)] = codec +} + +// RegisterService adds a new service to the server. +// +// The name parameter is optional: if empty it will be inferred from +// the receiver type name. +// +// Methods from the receiver will be extracted if these rules are satisfied: +// +// - The receiver is exported (begins with an upper case letter) or local +// (defined in the package registering the service). +// - The method name is exported. +// - The method has three arguments: *http.Request, *args, *reply. +// - All three arguments are pointers. +// - The second and third arguments are exported or local. +// - The method has return type error. +// +// All other methods are ignored. +func (s *Server) RegisterService(receiver interface{}, name string) error { + return s.services.register(receiver, name) +} + +// HasMethod returns true if the given method is registered. +// +// The method uses a dotted notation as in "Service.Method". +func (s *Server) HasMethod(method string) bool { + if _, _, err := s.services.get(method); err == nil { + return true + } + return false +} + +// ServeHTTP +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + if r.Method != "POST" { + WriteError(w, 405, "rpc: POST method required, received "+r.Method) + return + } + contentType := r.Header.Get("Content-Type") + idx := strings.Index(contentType, ";") + if idx != -1 { + contentType = contentType[:idx] + } + var codec Codec + if contentType == "" && len(s.codecs) == 1 { + // If Content-Type is not set and only one codec has been registered, + // then default to that codec. + for _, c := range s.codecs { + codec = c + } + } else if codec = s.codecs[strings.ToLower(contentType)]; codec == nil { + WriteError(w, 415, "rpc: unrecognized Content-Type: "+contentType) + return + } + // Create a new codec request. + codecReq := codec.NewRequest(r) + // Get service method to be called. + method, errMethod := codecReq.Method() + if errMethod != nil { + codecReq.WriteError(w, 400, errMethod) + return + } + serviceSpec, methodSpec, errGet := s.services.get(method) + if errGet != nil { + codecReq.WriteError(w, 400, errGet) + return + } + // Decode the args. + args := reflect.New(methodSpec.argsType) + if errRead := codecReq.ReadRequest(args.Interface()); errRead != nil { + codecReq.WriteError(w, 400, errRead) + return + } + // Call the service method. + reply := reflect.New(methodSpec.replyType) + errValue := methodSpec.method.Func.Call([]reflect.Value{ + serviceSpec.rcvr, + reflect.ValueOf(r), + args, + reply, + }) + // Cast the result to error if needed. + var errResult error + errInter := errValue[0].Interface() + if errInter != nil { + errResult = errInter.(error) + } + // Prevents Internet Explorer from MIME-sniffing a response away + // from the declared content-type + w.Header().Set("x-content-type-options", "nosniff") + // Encode the response. + if errResult == nil { + codecReq.WriteResponse(w, reply.Interface()) + } else { + codecReq.WriteError(w, 400, errResult) + } +} + +func WriteError(w http.ResponseWriter, status int, msg string) { + w.WriteHeader(status) + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + fmt.Fprint(w, msg) +} diff --git a/vendor/github.com/kr/fs/Readme b/vendor/github.com/kr/fs/Readme deleted file mode 100644 index c95e13fc8..000000000 --- a/vendor/github.com/kr/fs/Readme +++ /dev/null @@ -1,3 +0,0 @@ -Filesystem Package - -http://godoc.org/github.com/kr/fs diff --git a/vendor/github.com/kr/fs/filesystem.go b/vendor/github.com/kr/fs/filesystem.go deleted file mode 100644 index f1c4805fb..000000000 --- a/vendor/github.com/kr/fs/filesystem.go +++ /dev/null @@ -1,36 +0,0 @@ -package fs - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// FileSystem defines the methods of an abstract filesystem. -type FileSystem interface { - - // ReadDir reads the directory named by dirname and returns a - // list of directory entries. - ReadDir(dirname string) ([]os.FileInfo, error) - - // Lstat returns a FileInfo describing the named file. If the file is a - // symbolic link, the returned FileInfo describes the symbolic link. Lstat - // makes no attempt to follow the link. - Lstat(name string) (os.FileInfo, error) - - // Join joins any number of path elements into a single path, adding a - // separator if necessary. The result is Cleaned; in particular, all - // empty strings are ignored. - // - // The separator is FileSystem specific. - Join(elem ...string) string -} - -// fs represents a FileSystem provided by the os package. -type fs struct{} - -func (f *fs) ReadDir(dirname string) ([]os.FileInfo, error) { return ioutil.ReadDir(dirname) } - -func (f *fs) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } - -func (f *fs) Join(elem ...string) string { return filepath.Join(elem...) } diff --git a/vendor/github.com/kr/fs/walk.go b/vendor/github.com/kr/fs/walk.go deleted file mode 100644 index 6ffa1e0b2..000000000 --- a/vendor/github.com/kr/fs/walk.go +++ /dev/null @@ -1,95 +0,0 @@ -// Package fs provides filesystem-related functions. -package fs - -import ( - "os" -) - -// Walker provides a convenient interface for iterating over the -// descendants of a filesystem path. -// Successive calls to the Step method will step through each -// file or directory in the tree, including the root. The files -// are walked in lexical order, which makes the output deterministic -// but means that for very large directories Walker can be inefficient. -// Walker does not follow symbolic links. -type Walker struct { - fs FileSystem - cur item - stack []item - descend bool -} - -type item struct { - path string - info os.FileInfo - err error -} - -// Walk returns a new Walker rooted at root. -func Walk(root string) *Walker { - return WalkFS(root, new(fs)) -} - -// WalkFS returns a new Walker rooted at root on the FileSystem fs. -func WalkFS(root string, fs FileSystem) *Walker { - info, err := fs.Lstat(root) - return &Walker{ - fs: fs, - stack: []item{{root, info, err}}, - } -} - -// Step advances the Walker to the next file or directory, -// which will then be available through the Path, Stat, -// and Err methods. -// It returns false when the walk stops at the end of the tree. -func (w *Walker) Step() bool { - if w.descend && w.cur.err == nil && w.cur.info.IsDir() { - list, err := w.fs.ReadDir(w.cur.path) - if err != nil { - w.cur.err = err - w.stack = append(w.stack, w.cur) - } else { - for i := len(list) - 1; i >= 0; i-- { - path := w.fs.Join(w.cur.path, list[i].Name()) - w.stack = append(w.stack, item{path, list[i], nil}) - } - } - } - - if len(w.stack) == 0 { - return false - } - i := len(w.stack) - 1 - w.cur = w.stack[i] - w.stack = w.stack[:i] - w.descend = true - return true -} - -// Path returns the path to the most recent file or directory -// visited by a call to Step. It contains the argument to Walk -// as a prefix; that is, if Walk is called with "dir", which is -// a directory containing the file "a", Path will return "dir/a". -func (w *Walker) Path() string { - return w.cur.path -} - -// Stat returns info for the most recent file or directory -// visited by a call to Step. -func (w *Walker) Stat() os.FileInfo { - return w.cur.info -} - -// Err returns the error, if any, for the most recent attempt -// by Step to visit a file or directory. If a directory has -// an error, w will not descend into that directory. -func (w *Walker) Err() error { - return w.cur.err -} - -// SkipDir causes the currently visited directory to be skipped. -// If w is not on a directory, SkipDir has no effect. -func (w *Walker) SkipDir() { - w.descend = false -} diff --git a/vendor/github.com/opencontainers/runc/Dockerfile b/vendor/github.com/opencontainers/runc/Dockerfile index 567e55473..2350cf1e8 100644 --- a/vendor/github.com/opencontainers/runc/Dockerfile +++ b/vendor/github.com/opencontainers/runc/Dockerfile @@ -8,6 +8,7 @@ RUN apt-get update && apt-get install -y \ curl \ gawk \ iptables \ + jq \ pkg-config \ libaio-dev \ libcap-dev \ diff --git a/vendor/github.com/opencontainers/runc/Makefile b/vendor/github.com/opencontainers/runc/Makefile index 9b72ed604..779be9255 100644 --- a/vendor/github.com/opencontainers/runc/Makefile +++ b/vendor/github.com/opencontainers/runc/Makefile @@ -33,7 +33,7 @@ static: $(RUNC_LINK) CGO_ENABLED=1 go build -i -tags "$(BUILDTAGS) cgo static_build" -ldflags "-w -extldflags -static -X main.gitCommit=${COMMIT} -X main.version=${VERSION}" -o runc . release: $(RUNC_LINK) - @flag_list=(seccomp selinux apparmor static); \ + @flag_list=(seccomp selinux apparmor static ambient); \ unset expression; \ for flag in "$${flag_list[@]}"; do \ expression+="' '{'',$${flag}}"; \ diff --git a/vendor/github.com/opencontainers/runc/README.md b/vendor/github.com/opencontainers/runc/README.md index 4bca381aa..6c6d1d45e 100644 --- a/vendor/github.com/opencontainers/runc/README.md +++ b/vendor/github.com/opencontainers/runc/README.md @@ -17,6 +17,11 @@ You can find official releases of `runc` on the [release](https://github.com/ope `runc` currently supports the Linux platform with various architecture support. It must be built with Go version 1.6 or higher in order for some features to function properly. +In order to enable seccomp support you will need to install `libseccomp` on your platform. +> e.g. `libseccomp-devel` for CentOS, or `libseccomp-dev` for Ubuntu + +Otherwise, if you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make. + ```bash # create a 'github.com/opencontainers' in your GOPATH/src cd github.com/opencontainers @@ -29,9 +34,6 @@ sudo make install `runc` will be installed to `/usr/local/sbin/runc` on your system. -In order to enable seccomp support you will need to install libseccomp on your platform. -If you do not want to build `runc` with seccomp support you can add `BUILDTAGS=""` when running make. - #### Build Tags `runc` supports optional build tags for compiling support of various features. @@ -46,6 +48,7 @@ make BUILDTAGS='seccomp apparmor' | seccomp | Syscall filtering | libseccomp | | selinux | selinux process and mount labeling | | | apparmor | apparmor profile support | libapparmor | +| ambient | ambient capability support | kernel 4.3 | ### Running the test suite @@ -107,7 +110,7 @@ runc run mycontainerid If you used the unmodified `runc spec` template this should give you a `sh` session inside the container. The second way to start a container is using the specs lifecycle operations. -This gives you move power of how the container is created and managed while it is running. +This gives you more power over how the container is created and managed while it is running. This will also launch the container in the background so you will have to edit the `config.json` to remove the `terminal` setting for the simple examples here. Your process field in the `config.json` should look like this below with `"terminal": false` and `"args": ["sleep", "5"]`. diff --git a/vendor/github.com/opencontainers/runc/libcontainer/README.md b/vendor/github.com/opencontainers/runc/libcontainer/README.md index f625028aa..9435cf7db 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/README.md +++ b/vendor/github.com/opencontainers/runc/libcontainer/README.md @@ -223,6 +223,12 @@ container.Signal(signal) // update container resource constraints. container.Set(config) + +// get current status of the container. +status, err := container.Status() + +// get current container's state information. +state, err := container.State() ``` diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml index 588ceca18..567ccdbf2 100644 --- a/vendor/github.com/pkg/errors/.travis.yml +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -3,8 +3,8 @@ go_import_path: github.com/pkg/errors go: - 1.4.3 - 1.5.4 - - 1.6.2 - - 1.7.1 + - 1.6.3 + - 1.7.3 - tip script: diff --git a/vendor/github.com/pkg/sftp/.gitignore b/vendor/github.com/pkg/sftp/.gitignore deleted file mode 100644 index 9fc1e3d24..000000000 --- a/vendor/github.com/pkg/sftp/.gitignore +++ /dev/null @@ -1,8 +0,0 @@ -.*.swo -.*.swp - -server_standalone/server_standalone - -examples/sftp-server/id_rsa -examples/sftp-server/id_rsa.pub -examples/sftp-server/sftp-server diff --git a/vendor/github.com/pkg/sftp/.travis.yml b/vendor/github.com/pkg/sftp/.travis.yml deleted file mode 100644 index 3a3ba387f..000000000 --- a/vendor/github.com/pkg/sftp/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: go -go_import_path: github.com/pkg/sftp -go: - - 1.4.3 - - 1.5.2 - - 1.6.2 - - tip - -sudo: false - -addons: - ssh_known_hosts: - - bitbucket.org - -install: - - go get -t -v ./... - - ssh-keygen -t rsa -q -P "" -f /home/travis/.ssh/id_rsa - -script: - - go test -integration -v ./... - - go test -testserver -v ./... - - go test -integration -testserver -v ./... - - go test -race -integration -v ./... - - go test -race -testserver -v ./... - - go test -race -integration -testserver -v ./... diff --git a/vendor/github.com/pkg/sftp/CONTRIBUTORS b/vendor/github.com/pkg/sftp/CONTRIBUTORS deleted file mode 100644 index 7eff82364..000000000 --- a/vendor/github.com/pkg/sftp/CONTRIBUTORS +++ /dev/null @@ -1,2 +0,0 @@ -Dave Cheney -Saulius Gurklys diff --git a/vendor/github.com/pkg/sftp/LICENSE b/vendor/github.com/pkg/sftp/LICENSE deleted file mode 100644 index b7b53921e..000000000 --- a/vendor/github.com/pkg/sftp/LICENSE +++ /dev/null @@ -1,9 +0,0 @@ -Copyright (c) 2013, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/sftp/README.md b/vendor/github.com/pkg/sftp/README.md deleted file mode 100644 index e406b1e56..000000000 --- a/vendor/github.com/pkg/sftp/README.md +++ /dev/null @@ -1,27 +0,0 @@ -sftp ----- - -The `sftp` package provides support for file system operations on remote ssh servers using the SFTP subsystem. - -[![UNIX Build Status](https://travis-ci.org/pkg/sftp.svg?branch=master)](https://travis-ci.org/pkg/sftp) [![GoDoc](http://godoc.org/github.com/pkg/sftp?status.svg)](http://godoc.org/github.com/pkg/sftp) - -usage and examples ------------------- - -See [godoc.org/github.com/pkg/sftp](http://godoc.org/github.com/pkg/sftp) for examples and usage. - -The basic operation of the package mirrors the facilities of the [os](http://golang.org/pkg/os) package. - -The Walker interface for directory traversal is heavily inspired by Keith Rarick's [fs](http://godoc.org/github.com/kr/fs) package. - -roadmap -------- - - * There is way too much duplication in the Client methods. If there was an unmarshal(interface{}) method this would reduce a heap of the duplication. - -contributing ------------- - -We welcome pull requests, bug fixes and issue reports. - -Before proposing a large change, first please discuss your change by raising an issue. diff --git a/vendor/github.com/pkg/sftp/attrs.go b/vendor/github.com/pkg/sftp/attrs.go deleted file mode 100644 index 3e4c2912d..000000000 --- a/vendor/github.com/pkg/sftp/attrs.go +++ /dev/null @@ -1,237 +0,0 @@ -package sftp - -// ssh_FXP_ATTRS support -// see http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02#section-5 - -import ( - "os" - "syscall" - "time" -) - -const ( - ssh_FILEXFER_ATTR_SIZE = 0x00000001 - ssh_FILEXFER_ATTR_UIDGID = 0x00000002 - ssh_FILEXFER_ATTR_PERMISSIONS = 0x00000004 - ssh_FILEXFER_ATTR_ACMODTIME = 0x00000008 - ssh_FILEXFER_ATTR_EXTENDED = 0x80000000 -) - -// fileInfo is an artificial type designed to satisfy os.FileInfo. -type fileInfo struct { - name string - size int64 - mode os.FileMode - mtime time.Time - sys interface{} -} - -// Name returns the base name of the file. -func (fi *fileInfo) Name() string { return fi.name } - -// Size returns the length in bytes for regular files; system-dependent for others. -func (fi *fileInfo) Size() int64 { return fi.size } - -// Mode returns file mode bits. -func (fi *fileInfo) Mode() os.FileMode { return fi.mode } - -// ModTime returns the last modification time of the file. -func (fi *fileInfo) ModTime() time.Time { return fi.mtime } - -// IsDir returns true if the file is a directory. -func (fi *fileInfo) IsDir() bool { return fi.Mode().IsDir() } - -func (fi *fileInfo) Sys() interface{} { return fi.sys } - -// FileStat holds the original unmarshalled values from a call to READDIR or *STAT. -// It is exported for the purposes of accessing the raw values via os.FileInfo.Sys() -type FileStat struct { - Size uint64 - Mode uint32 - Mtime uint32 - Atime uint32 - UID uint32 - GID uint32 - Extended []StatExtended -} - -// StatExtended contains additional, extended information for a FileStat. -type StatExtended struct { - ExtType string - ExtData string -} - -func fileInfoFromStat(st *FileStat, name string) os.FileInfo { - fs := &fileInfo{ - name: name, - size: int64(st.Size), - mode: toFileMode(st.Mode), - mtime: time.Unix(int64(st.Mtime), 0), - sys: st, - } - return fs -} - -func fileStatFromInfo(fi os.FileInfo) (uint32, FileStat) { - mtime := fi.ModTime().Unix() - atime := mtime - var flags uint32 = ssh_FILEXFER_ATTR_SIZE | - ssh_FILEXFER_ATTR_PERMISSIONS | - ssh_FILEXFER_ATTR_ACMODTIME - - fileStat := FileStat{ - Size: uint64(fi.Size()), - Mode: fromFileMode(fi.Mode()), - Mtime: uint32(mtime), - Atime: uint32(atime), - } - - // os specific file stat decoding - fileStatFromInfoOs(fi, &flags, &fileStat) - - return flags, fileStat -} - -func unmarshalAttrs(b []byte) (*FileStat, []byte) { - flags, b := unmarshalUint32(b) - var fs FileStat - if flags&ssh_FILEXFER_ATTR_SIZE == ssh_FILEXFER_ATTR_SIZE { - fs.Size, b = unmarshalUint64(b) - } - if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { - fs.UID, b = unmarshalUint32(b) - } - if flags&ssh_FILEXFER_ATTR_UIDGID == ssh_FILEXFER_ATTR_UIDGID { - fs.GID, b = unmarshalUint32(b) - } - if flags&ssh_FILEXFER_ATTR_PERMISSIONS == ssh_FILEXFER_ATTR_PERMISSIONS { - fs.Mode, b = unmarshalUint32(b) - } - if flags&ssh_FILEXFER_ATTR_ACMODTIME == ssh_FILEXFER_ATTR_ACMODTIME { - fs.Atime, b = unmarshalUint32(b) - fs.Mtime, b = unmarshalUint32(b) - } - if flags&ssh_FILEXFER_ATTR_EXTENDED == ssh_FILEXFER_ATTR_EXTENDED { - var count uint32 - count, b = unmarshalUint32(b) - ext := make([]StatExtended, count, count) - for i := uint32(0); i < count; i++ { - var typ string - var data string - typ, b = unmarshalString(b) - data, b = unmarshalString(b) - ext[i] = StatExtended{typ, data} - } - fs.Extended = ext - } - return &fs, b -} - -func marshalFileInfo(b []byte, fi os.FileInfo) []byte { - // attributes variable struct, and also variable per protocol version - // spec version 3 attributes: - // uint32 flags - // uint64 size present only if flag SSH_FILEXFER_ATTR_SIZE - // uint32 uid present only if flag SSH_FILEXFER_ATTR_UIDGID - // uint32 gid present only if flag SSH_FILEXFER_ATTR_UIDGID - // uint32 permissions present only if flag SSH_FILEXFER_ATTR_PERMISSIONS - // uint32 atime present only if flag SSH_FILEXFER_ACMODTIME - // uint32 mtime present only if flag SSH_FILEXFER_ACMODTIME - // uint32 extended_count present only if flag SSH_FILEXFER_ATTR_EXTENDED - // string extended_type - // string extended_data - // ... more extended data (extended_type - extended_data pairs), - // so that number of pairs equals extended_count - - flags, fileStat := fileStatFromInfo(fi) - - b = marshalUint32(b, flags) - if flags&ssh_FILEXFER_ATTR_SIZE != 0 { - b = marshalUint64(b, fileStat.Size) - } - if flags&ssh_FILEXFER_ATTR_UIDGID != 0 { - b = marshalUint32(b, fileStat.UID) - b = marshalUint32(b, fileStat.GID) - } - if flags&ssh_FILEXFER_ATTR_PERMISSIONS != 0 { - b = marshalUint32(b, fileStat.Mode) - } - if flags&ssh_FILEXFER_ATTR_ACMODTIME != 0 { - b = marshalUint32(b, fileStat.Atime) - b = marshalUint32(b, fileStat.Mtime) - } - - return b -} - -// toFileMode converts sftp filemode bits to the os.FileMode specification -func toFileMode(mode uint32) os.FileMode { - var fm = os.FileMode(mode & 0777) - switch mode & syscall.S_IFMT { - case syscall.S_IFBLK: - fm |= os.ModeDevice - case syscall.S_IFCHR: - fm |= os.ModeDevice | os.ModeCharDevice - case syscall.S_IFDIR: - fm |= os.ModeDir - case syscall.S_IFIFO: - fm |= os.ModeNamedPipe - case syscall.S_IFLNK: - fm |= os.ModeSymlink - case syscall.S_IFREG: - // nothing to do - case syscall.S_IFSOCK: - fm |= os.ModeSocket - } - if mode&syscall.S_ISGID != 0 { - fm |= os.ModeSetgid - } - if mode&syscall.S_ISUID != 0 { - fm |= os.ModeSetuid - } - if mode&syscall.S_ISVTX != 0 { - fm |= os.ModeSticky - } - return fm -} - -// fromFileMode converts from the os.FileMode specification to sftp filemode bits -func fromFileMode(mode os.FileMode) uint32 { - ret := uint32(0) - - if mode&os.ModeDevice != 0 { - if mode&os.ModeCharDevice != 0 { - ret |= syscall.S_IFCHR - } else { - ret |= syscall.S_IFBLK - } - } - if mode&os.ModeDir != 0 { - ret |= syscall.S_IFDIR - } - if mode&os.ModeSymlink != 0 { - ret |= syscall.S_IFLNK - } - if mode&os.ModeNamedPipe != 0 { - ret |= syscall.S_IFIFO - } - if mode&os.ModeSetgid != 0 { - ret |= syscall.S_ISGID - } - if mode&os.ModeSetuid != 0 { - ret |= syscall.S_ISUID - } - if mode&os.ModeSticky != 0 { - ret |= syscall.S_ISVTX - } - if mode&os.ModeSocket != 0 { - ret |= syscall.S_IFSOCK - } - - if mode&os.ModeType == 0 { - ret |= syscall.S_IFREG - } - ret |= uint32(mode & os.ModePerm) - - return ret -} diff --git a/vendor/github.com/pkg/sftp/attrs_stubs.go b/vendor/github.com/pkg/sftp/attrs_stubs.go deleted file mode 100644 index 81cf3eac2..000000000 --- a/vendor/github.com/pkg/sftp/attrs_stubs.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !cgo,!plan9 windows android - -package sftp - -import ( - "os" -) - -func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { - // todo -} diff --git a/vendor/github.com/pkg/sftp/attrs_unix.go b/vendor/github.com/pkg/sftp/attrs_unix.go deleted file mode 100644 index ab6ecdea9..000000000 --- a/vendor/github.com/pkg/sftp/attrs_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris -// +build cgo - -package sftp - -import ( - "os" - "syscall" -) - -func fileStatFromInfoOs(fi os.FileInfo, flags *uint32, fileStat *FileStat) { - if statt, ok := fi.Sys().(*syscall.Stat_t); ok { - *flags |= ssh_FILEXFER_ATTR_UIDGID - fileStat.UID = statt.Uid - fileStat.GID = statt.Gid - } -} diff --git a/vendor/github.com/pkg/sftp/client.go b/vendor/github.com/pkg/sftp/client.go deleted file mode 100644 index e95bbab47..000000000 --- a/vendor/github.com/pkg/sftp/client.go +++ /dev/null @@ -1,1128 +0,0 @@ -package sftp - -import ( - "bytes" - "encoding/binary" - "io" - "os" - "path" - "sync/atomic" - "time" - - "github.com/kr/fs" - "github.com/pkg/errors" - "golang.org/x/crypto/ssh" -) - -// MaxPacket sets the maximum size of the payload. -func MaxPacket(size int) func(*Client) error { - return func(c *Client) error { - if size < 1<<15 { - return errors.Errorf("size must be greater or equal to 32k") - } - c.maxPacket = size - return nil - } -} - -// NewClient creates a new SFTP client on conn, using zero or more option -// functions. -func NewClient(conn *ssh.Client, opts ...func(*Client) error) (*Client, error) { - s, err := conn.NewSession() - if err != nil { - return nil, err - } - if err := s.RequestSubsystem("sftp"); err != nil { - return nil, err - } - pw, err := s.StdinPipe() - if err != nil { - return nil, err - } - pr, err := s.StdoutPipe() - if err != nil { - return nil, err - } - - return NewClientPipe(pr, pw, opts...) -} - -// NewClientPipe creates a new SFTP client given a Reader and a WriteCloser. -// This can be used for connecting to an SFTP server over TCP/TLS or by using -// the system's ssh client program (e.g. via exec.Command). -func NewClientPipe(rd io.Reader, wr io.WriteCloser, opts ...func(*Client) error) (*Client, error) { - sftp := &Client{ - clientConn: clientConn{ - conn: conn{ - Reader: rd, - WriteCloser: wr, - }, - inflight: make(map[uint32]chan<- result), - }, - maxPacket: 1 << 15, - } - if err := sftp.applyOptions(opts...); err != nil { - wr.Close() - return nil, err - } - if err := sftp.sendInit(); err != nil { - wr.Close() - return nil, err - } - if err := sftp.recvVersion(); err != nil { - wr.Close() - return nil, err - } - sftp.clientConn.wg.Add(1) - go sftp.loop() - return sftp, nil -} - -// Client represents an SFTP session on a *ssh.ClientConn SSH connection. -// Multiple Clients can be active on a single SSH connection, and a Client -// may be called concurrently from multiple Goroutines. -// -// Client implements the github.com/kr/fs.FileSystem interface. -type Client struct { - clientConn - - maxPacket int // max packet size read or written. - nextid uint32 -} - -// Create creates the named file mode 0666 (before umask), truncating it if -// it already exists. If successful, methods on the returned File can be -// used for I/O; the associated file descriptor has mode O_RDWR. -func (c *Client) Create(path string) (*File, error) { - return c.open(path, flags(os.O_RDWR|os.O_CREATE|os.O_TRUNC)) -} - -const sftpProtocolVersion = 3 // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 - -func (c *Client) sendInit() error { - return c.clientConn.conn.sendPacket(sshFxInitPacket{ - Version: sftpProtocolVersion, // http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 - }) -} - -// returns the next value of c.nextid -func (c *Client) nextID() uint32 { - return atomic.AddUint32(&c.nextid, 1) -} - -func (c *Client) recvVersion() error { - typ, data, err := c.recvPacket() - if err != nil { - return err - } - if typ != ssh_FXP_VERSION { - return &unexpectedPacketErr{ssh_FXP_VERSION, typ} - } - - version, _ := unmarshalUint32(data) - if version != sftpProtocolVersion { - return &unexpectedVersionErr{sftpProtocolVersion, version} - } - - return nil -} - -// Walk returns a new Walker rooted at root. -func (c *Client) Walk(root string) *fs.Walker { - return fs.WalkFS(root, c) -} - -// ReadDir reads the directory named by dirname and returns a list of -// directory entries. -func (c *Client) ReadDir(p string) ([]os.FileInfo, error) { - handle, err := c.opendir(p) - if err != nil { - return nil, err - } - defer c.close(handle) // this has to defer earlier than the lock below - var attrs []os.FileInfo - var done = false - for !done { - id := c.nextID() - typ, data, err1 := c.sendPacket(sshFxpReaddirPacket{ - ID: id, - Handle: handle, - }) - if err1 != nil { - err = err1 - done = true - break - } - switch typ { - case ssh_FXP_NAME: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - count, data := unmarshalUint32(data) - for i := uint32(0); i < count; i++ { - var filename string - filename, data = unmarshalString(data) - _, data = unmarshalString(data) // discard longname - var attr *FileStat - attr, data = unmarshalAttrs(data) - if filename == "." || filename == ".." { - continue - } - attrs = append(attrs, fileInfoFromStat(attr, path.Base(filename))) - } - case ssh_FXP_STATUS: - // TODO(dfc) scope warning! - err = normaliseError(unmarshalStatus(id, data)) - done = true - default: - return nil, unimplementedPacketErr(typ) - } - } - if err == io.EOF { - err = nil - } - return attrs, err -} - -func (c *Client) opendir(path string) (string, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpOpendirPacket{ - ID: id, - Path: path, - }) - if err != nil { - return "", err - } - switch typ { - case ssh_FXP_HANDLE: - sid, data := unmarshalUint32(data) - if sid != id { - return "", &unexpectedIDErr{id, sid} - } - handle, _ := unmarshalString(data) - return handle, nil - case ssh_FXP_STATUS: - return "", unmarshalStatus(id, data) - default: - return "", unimplementedPacketErr(typ) - } -} - -// Stat returns a FileInfo structure describing the file specified by path 'p'. -// If 'p' is a symbolic link, the returned FileInfo structure describes the referent file. -func (c *Client) Stat(p string) (os.FileInfo, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpStatPacket{ - ID: id, - Path: p, - }) - if err != nil { - return nil, err - } - switch typ { - case ssh_FXP_ATTRS: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return fileInfoFromStat(attr, path.Base(p)), nil - case ssh_FXP_STATUS: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// Lstat returns a FileInfo structure describing the file specified by path 'p'. -// If 'p' is a symbolic link, the returned FileInfo structure describes the symbolic link. -func (c *Client) Lstat(p string) (os.FileInfo, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpLstatPacket{ - ID: id, - Path: p, - }) - if err != nil { - return nil, err - } - switch typ { - case ssh_FXP_ATTRS: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return fileInfoFromStat(attr, path.Base(p)), nil - case ssh_FXP_STATUS: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// ReadLink reads the target of a symbolic link. -func (c *Client) ReadLink(p string) (string, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpReadlinkPacket{ - ID: id, - Path: p, - }) - if err != nil { - return "", err - } - switch typ { - case ssh_FXP_NAME: - sid, data := unmarshalUint32(data) - if sid != id { - return "", &unexpectedIDErr{id, sid} - } - count, data := unmarshalUint32(data) - if count != 1 { - return "", unexpectedCount(1, count) - } - filename, _ := unmarshalString(data) // ignore dummy attributes - return filename, nil - case ssh_FXP_STATUS: - return "", unmarshalStatus(id, data) - default: - return "", unimplementedPacketErr(typ) - } -} - -// Symlink creates a symbolic link at 'newname', pointing at target 'oldname' -func (c *Client) Symlink(oldname, newname string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpSymlinkPacket{ - ID: id, - Linkpath: newname, - Targetpath: oldname, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// setstat is a convience wrapper to allow for changing of various parts of the file descriptor. -func (c *Client) setstat(path string, flags uint32, attrs interface{}) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpSetstatPacket{ - ID: id, - Path: path, - Flags: flags, - Attrs: attrs, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// Chtimes changes the access and modification times of the named file. -func (c *Client) Chtimes(path string, atime time.Time, mtime time.Time) error { - type times struct { - Atime uint32 - Mtime uint32 - } - attrs := times{uint32(atime.Unix()), uint32(mtime.Unix())} - return c.setstat(path, ssh_FILEXFER_ATTR_ACMODTIME, attrs) -} - -// Chown changes the user and group owners of the named file. -func (c *Client) Chown(path string, uid, gid int) error { - type owner struct { - UID uint32 - GID uint32 - } - attrs := owner{uint32(uid), uint32(gid)} - return c.setstat(path, ssh_FILEXFER_ATTR_UIDGID, attrs) -} - -// Chmod changes the permissions of the named file. -func (c *Client) Chmod(path string, mode os.FileMode) error { - return c.setstat(path, ssh_FILEXFER_ATTR_PERMISSIONS, uint32(mode)) -} - -// Truncate sets the size of the named file. Although it may be safely assumed -// that if the size is less than its current size it will be truncated to fit, -// the SFTP protocol does not specify what behavior the server should do when setting -// size greater than the current size. -func (c *Client) Truncate(path string, size int64) error { - return c.setstat(path, ssh_FILEXFER_ATTR_SIZE, uint64(size)) -} - -// Open opens the named file for reading. If successful, methods on the -// returned file can be used for reading; the associated file descriptor -// has mode O_RDONLY. -func (c *Client) Open(path string) (*File, error) { - return c.open(path, flags(os.O_RDONLY)) -} - -// OpenFile is the generalized open call; most users will use Open or -// Create instead. It opens the named file with specified flag (O_RDONLY -// etc.). If successful, methods on the returned File can be used for I/O. -func (c *Client) OpenFile(path string, f int) (*File, error) { - return c.open(path, flags(f)) -} - -func (c *Client) open(path string, pflags uint32) (*File, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpOpenPacket{ - ID: id, - Path: path, - Pflags: pflags, - }) - if err != nil { - return nil, err - } - switch typ { - case ssh_FXP_HANDLE: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - handle, _ := unmarshalString(data) - return &File{c: c, path: path, handle: handle}, nil - case ssh_FXP_STATUS: - return nil, normaliseError(unmarshalStatus(id, data)) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// close closes a handle handle previously returned in the response -// to SSH_FXP_OPEN or SSH_FXP_OPENDIR. The handle becomes invalid -// immediately after this request has been sent. -func (c *Client) close(handle string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpClosePacket{ - ID: id, - Handle: handle, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -func (c *Client) fstat(handle string) (*FileStat, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpFstatPacket{ - ID: id, - Handle: handle, - }) - if err != nil { - return nil, err - } - switch typ { - case ssh_FXP_ATTRS: - sid, data := unmarshalUint32(data) - if sid != id { - return nil, &unexpectedIDErr{id, sid} - } - attr, _ := unmarshalAttrs(data) - return attr, nil - case ssh_FXP_STATUS: - return nil, unmarshalStatus(id, data) - default: - return nil, unimplementedPacketErr(typ) - } -} - -// StatVFS retrieves VFS statistics from a remote host. -// -// It implements the statvfs@openssh.com SSH_FXP_EXTENDED feature -// from http://www.opensource.apple.com/source/OpenSSH/OpenSSH-175/openssh/PROTOCOL?txt. -func (c *Client) StatVFS(path string) (*StatVFS, error) { - // send the StatVFS packet to the server - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpStatvfsPacket{ - ID: id, - Path: path, - }) - if err != nil { - return nil, err - } - - switch typ { - // server responded with valid data - case ssh_FXP_EXTENDED_REPLY: - var response StatVFS - err = binary.Read(bytes.NewReader(data), binary.BigEndian, &response) - if err != nil { - return nil, errors.New("can not parse reply") - } - - return &response, nil - - // the resquest failed - case ssh_FXP_STATUS: - return nil, errors.New(fxp(ssh_FXP_STATUS).String()) - - default: - return nil, unimplementedPacketErr(typ) - } -} - -// Join joins any number of path elements into a single path, adding a -// separating slash if necessary. The result is Cleaned; in particular, all -// empty strings are ignored. -func (c *Client) Join(elem ...string) string { return path.Join(elem...) } - -// Remove removes the specified file or directory. An error will be returned if no -// file or directory with the specified path exists, or if the specified directory -// is not empty. -func (c *Client) Remove(path string) error { - err := c.removeFile(path) - if err, ok := err.(*StatusError); ok { - switch err.Code { - // some servers, *cough* osx *cough*, return EPERM, not ENODIR. - // serv-u returns ssh_FX_FILE_IS_A_DIRECTORY - case ssh_FX_PERMISSION_DENIED, ssh_FX_FAILURE, ssh_FX_FILE_IS_A_DIRECTORY: - return c.removeDirectory(path) - } - } - return err -} - -func (c *Client) removeFile(path string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpRemovePacket{ - ID: id, - Filename: path, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -func (c *Client) removeDirectory(path string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpRmdirPacket{ - ID: id, - Path: path, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// Rename renames a file. -func (c *Client) Rename(oldname, newname string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpRenamePacket{ - ID: id, - Oldpath: oldname, - Newpath: newname, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -func (c *Client) realpath(path string) (string, error) { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpRealpathPacket{ - ID: id, - Path: path, - }) - if err != nil { - return "", err - } - switch typ { - case ssh_FXP_NAME: - sid, data := unmarshalUint32(data) - if sid != id { - return "", &unexpectedIDErr{id, sid} - } - count, data := unmarshalUint32(data) - if count != 1 { - return "", unexpectedCount(1, count) - } - filename, _ := unmarshalString(data) // ignore attributes - return filename, nil - case ssh_FXP_STATUS: - return "", normaliseError(unmarshalStatus(id, data)) - default: - return "", unimplementedPacketErr(typ) - } -} - -// Getwd returns the current working directory of the server. Operations -// involving relative paths will be based at this location. -func (c *Client) Getwd() (string, error) { - return c.realpath(".") -} - -// Mkdir creates the specified directory. An error will be returned if a file or -// directory with the specified path already exists, or if the directory's -// parent folder does not exist (the method cannot create complete paths). -func (c *Client) Mkdir(path string) error { - id := c.nextID() - typ, data, err := c.sendPacket(sshFxpMkdirPacket{ - ID: id, - Path: path, - }) - if err != nil { - return err - } - switch typ { - case ssh_FXP_STATUS: - return normaliseError(unmarshalStatus(id, data)) - default: - return unimplementedPacketErr(typ) - } -} - -// applyOptions applies options functions to the Client. -// If an error is encountered, option processing ceases. -func (c *Client) applyOptions(opts ...func(*Client) error) error { - for _, f := range opts { - if err := f(c); err != nil { - return err - } - } - return nil -} - -// File represents a remote file. -type File struct { - c *Client - path string - handle string - offset uint64 // current offset within remote file -} - -// Close closes the File, rendering it unusable for I/O. It returns an -// error, if any. -func (f *File) Close() error { - return f.c.close(f.handle) -} - -// Name returns the name of the file as presented to Open or Create. -func (f *File) Name() string { - return f.path -} - -const maxConcurrentRequests = 64 - -// Read reads up to len(b) bytes from the File. It returns the number of -// bytes read and an error, if any. EOF is signaled by a zero count with -// err set to io.EOF. -func (f *File) Read(b []byte) (int, error) { - // Split the read into multiple maxPacket sized concurrent reads - // bounded by maxConcurrentRequests. This allows reads with a suitably - // large buffer to transfer data at a much faster rate due to - // overlapping round trip times. - inFlight := 0 - desiredInFlight := 1 - offset := f.offset - ch := make(chan result, 1) - type inflightRead struct { - b []byte - offset uint64 - } - reqs := map[uint32]inflightRead{} - type offsetErr struct { - offset uint64 - err error - } - var firstErr offsetErr - - sendReq := func(b []byte, offset uint64) { - reqID := f.c.nextID() - f.c.dispatchRequest(ch, sshFxpReadPacket{ - ID: reqID, - Handle: f.handle, - Offset: offset, - Len: uint32(len(b)), - }) - inFlight++ - reqs[reqID] = inflightRead{b: b, offset: offset} - } - - var read int - for len(b) > 0 || inFlight > 0 { - for inFlight < desiredInFlight && len(b) > 0 && firstErr.err == nil { - l := min(len(b), f.c.maxPacket) - rb := b[:l] - sendReq(rb, offset) - offset += uint64(l) - b = b[l:] - } - - if inFlight == 0 { - break - } - select { - case res := <-ch: - inFlight-- - if res.err != nil { - firstErr = offsetErr{offset: 0, err: res.err} - break - } - reqID, data := unmarshalUint32(res.data) - req, ok := reqs[reqID] - if !ok { - firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} - break - } - delete(reqs, reqID) - switch res.typ { - case ssh_FXP_STATUS: - if firstErr.err == nil || req.offset < firstErr.offset { - firstErr = offsetErr{ - offset: req.offset, - err: normaliseError(unmarshalStatus(reqID, res.data)), - } - break - } - case ssh_FXP_DATA: - l, data := unmarshalUint32(data) - n := copy(req.b, data[:l]) - read += n - if n < len(req.b) { - sendReq(req.b[l:], req.offset+uint64(l)) - } - if desiredInFlight < maxConcurrentRequests { - desiredInFlight++ - } - default: - firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} - break - } - } - } - // If the error is anything other than EOF, then there - // may be gaps in the data copied to the buffer so it's - // best to return 0 so the caller can't make any - // incorrect assumptions about the state of the buffer. - if firstErr.err != nil && firstErr.err != io.EOF { - read = 0 - } - f.offset += uint64(read) - return read, firstErr.err -} - -// WriteTo writes the file to w. The return value is the number of bytes -// written. Any error encountered during the write is also returned. -func (f *File) WriteTo(w io.Writer) (int64, error) { - fi, err := f.Stat() - if err != nil { - return 0, err - } - inFlight := 0 - desiredInFlight := 1 - offset := f.offset - writeOffset := offset - fileSize := uint64(fi.Size()) - ch := make(chan result, 1) - type inflightRead struct { - b []byte - offset uint64 - } - reqs := map[uint32]inflightRead{} - pendingWrites := map[uint64][]byte{} - type offsetErr struct { - offset uint64 - err error - } - var firstErr offsetErr - - sendReq := func(b []byte, offset uint64) { - reqID := f.c.nextID() - f.c.dispatchRequest(ch, sshFxpReadPacket{ - ID: reqID, - Handle: f.handle, - Offset: offset, - Len: uint32(len(b)), - }) - inFlight++ - reqs[reqID] = inflightRead{b: b, offset: offset} - } - - var copied int64 - for firstErr.err == nil || inFlight > 0 { - for inFlight < desiredInFlight && firstErr.err == nil { - b := make([]byte, f.c.maxPacket) - sendReq(b, offset) - offset += uint64(f.c.maxPacket) - if offset > fileSize { - desiredInFlight = 1 - } - } - - if inFlight == 0 { - break - } - select { - case res := <-ch: - inFlight-- - if res.err != nil { - firstErr = offsetErr{offset: 0, err: res.err} - break - } - reqID, data := unmarshalUint32(res.data) - req, ok := reqs[reqID] - if !ok { - firstErr = offsetErr{offset: 0, err: errors.Errorf("sid: %v not found", reqID)} - break - } - delete(reqs, reqID) - switch res.typ { - case ssh_FXP_STATUS: - if firstErr.err == nil || req.offset < firstErr.offset { - firstErr = offsetErr{offset: req.offset, err: normaliseError(unmarshalStatus(reqID, res.data))} - break - } - case ssh_FXP_DATA: - l, data := unmarshalUint32(data) - if req.offset == writeOffset { - nbytes, err := w.Write(data) - copied += int64(nbytes) - if err != nil { - firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: err} - break - } - if nbytes < int(l) { - firstErr = offsetErr{offset: req.offset + uint64(nbytes), err: io.ErrShortWrite} - break - } - switch { - case offset > fileSize: - desiredInFlight = 1 - case desiredInFlight < maxConcurrentRequests: - desiredInFlight++ - } - writeOffset += uint64(nbytes) - for pendingData, ok := pendingWrites[writeOffset]; ok; pendingData, ok = pendingWrites[writeOffset] { - nbytes, err := w.Write(pendingData) - if err != nil { - firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: err} - break - } - if nbytes < len(pendingData) { - firstErr = offsetErr{offset: writeOffset + uint64(nbytes), err: io.ErrShortWrite} - break - } - writeOffset += uint64(nbytes) - inFlight-- - } - } else { - // Don't write the data yet because - // this response came in out of order - // and we need to wait for responses - // for earlier segments of the file. - inFlight++ // Pending writes should still be considered inFlight. - pendingWrites[req.offset] = data - } - default: - firstErr = offsetErr{offset: 0, err: unimplementedPacketErr(res.typ)} - break - } - } - } - if firstErr.err != io.EOF { - return copied, firstErr.err - } - return copied, nil -} - -// Stat returns the FileInfo structure describing file. If there is an -// error. -func (f *File) Stat() (os.FileInfo, error) { - fs, err := f.c.fstat(f.handle) - if err != nil { - return nil, err - } - return fileInfoFromStat(fs, path.Base(f.path)), nil -} - -// Write writes len(b) bytes to the File. It returns the number of bytes -// written and an error, if any. Write returns a non-nil error when n != -// len(b). -func (f *File) Write(b []byte) (int, error) { - // Split the write into multiple maxPacket sized concurrent writes - // bounded by maxConcurrentRequests. This allows writes with a suitably - // large buffer to transfer data at a much faster rate due to - // overlapping round trip times. - inFlight := 0 - desiredInFlight := 1 - offset := f.offset - ch := make(chan result, 1) - var firstErr error - written := len(b) - for len(b) > 0 || inFlight > 0 { - for inFlight < desiredInFlight && len(b) > 0 && firstErr == nil { - l := min(len(b), f.c.maxPacket) - rb := b[:l] - f.c.dispatchRequest(ch, sshFxpWritePacket{ - ID: f.c.nextID(), - Handle: f.handle, - Offset: offset, - Length: uint32(len(rb)), - Data: rb, - }) - inFlight++ - offset += uint64(l) - b = b[l:] - } - - if inFlight == 0 { - break - } - select { - case res := <-ch: - inFlight-- - if res.err != nil { - firstErr = res.err - break - } - switch res.typ { - case ssh_FXP_STATUS: - id, _ := unmarshalUint32(res.data) - err := normaliseError(unmarshalStatus(id, res.data)) - if err != nil && firstErr == nil { - firstErr = err - break - } - if desiredInFlight < maxConcurrentRequests { - desiredInFlight++ - } - default: - firstErr = unimplementedPacketErr(res.typ) - break - } - } - } - // If error is non-nil, then there may be gaps in the data written to - // the file so it's best to return 0 so the caller can't make any - // incorrect assumptions about the state of the file. - if firstErr != nil { - written = 0 - } - f.offset += uint64(written) - return written, firstErr -} - -// ReadFrom reads data from r until EOF and writes it to the file. The return -// value is the number of bytes read. Any error except io.EOF encountered -// during the read is also returned. -func (f *File) ReadFrom(r io.Reader) (int64, error) { - inFlight := 0 - desiredInFlight := 1 - offset := f.offset - ch := make(chan result, 1) - var firstErr error - read := int64(0) - b := make([]byte, f.c.maxPacket) - for inFlight > 0 || firstErr == nil { - for inFlight < desiredInFlight && firstErr == nil { - n, err := r.Read(b) - if err != nil { - firstErr = err - } - f.c.dispatchRequest(ch, sshFxpWritePacket{ - ID: f.c.nextID(), - Handle: f.handle, - Offset: offset, - Length: uint32(n), - Data: b[:n], - }) - inFlight++ - offset += uint64(n) - read += int64(n) - } - - if inFlight == 0 { - break - } - select { - case res := <-ch: - inFlight-- - if res.err != nil { - firstErr = res.err - break - } - switch res.typ { - case ssh_FXP_STATUS: - id, _ := unmarshalUint32(res.data) - err := normaliseError(unmarshalStatus(id, res.data)) - if err != nil && firstErr == nil { - firstErr = err - break - } - if desiredInFlight < maxConcurrentRequests { - desiredInFlight++ - } - default: - firstErr = unimplementedPacketErr(res.typ) - break - } - } - } - if firstErr == io.EOF { - firstErr = nil - } - // If error is non-nil, then there may be gaps in the data written to - // the file so it's best to return 0 so the caller can't make any - // incorrect assumptions about the state of the file. - if firstErr != nil { - read = 0 - } - f.offset += uint64(read) - return read, firstErr -} - -// Seek implements io.Seeker by setting the client offset for the next Read or -// Write. It returns the next offset read. Seeking before or after the end of -// the file is undefined. Seeking relative to the end calls Stat. -func (f *File) Seek(offset int64, whence int) (int64, error) { - switch whence { - case os.SEEK_SET: - f.offset = uint64(offset) - case os.SEEK_CUR: - f.offset = uint64(int64(f.offset) + offset) - case os.SEEK_END: - fi, err := f.Stat() - if err != nil { - return int64(f.offset), err - } - f.offset = uint64(fi.Size() + offset) - default: - return int64(f.offset), unimplementedSeekWhence(whence) - } - return int64(f.offset), nil -} - -// Chown changes the uid/gid of the current file. -func (f *File) Chown(uid, gid int) error { - return f.c.Chown(f.path, uid, gid) -} - -// Chmod changes the permissions of the current file. -func (f *File) Chmod(mode os.FileMode) error { - return f.c.Chmod(f.path, mode) -} - -// Truncate sets the size of the current file. Although it may be safely assumed -// that if the size is less than its current size it will be truncated to fit, -// the SFTP protocol does not specify what behavior the server should do when setting -// size greater than the current size. -func (f *File) Truncate(size int64) error { - return f.c.Truncate(f.path, size) -} - -func min(a, b int) int { - if a > b { - return b - } - return a -} - -// normaliseError normalises an error into a more standard form that can be -// checked against stdlib errors like io.EOF or os.ErrNotExist. -func normaliseError(err error) error { - switch err := err.(type) { - case *StatusError: - switch err.Code { - case ssh_FX_EOF: - return io.EOF - case ssh_FX_NO_SUCH_FILE: - return os.ErrNotExist - case ssh_FX_OK: - return nil - default: - return err - } - default: - return err - } -} - -func unmarshalStatus(id uint32, data []byte) error { - sid, data := unmarshalUint32(data) - if sid != id { - return &unexpectedIDErr{id, sid} - } - code, data := unmarshalUint32(data) - msg, data, err := unmarshalStringSafe(data) - if err != nil { - return err - } - lang, _, _ := unmarshalStringSafe(data) - return &StatusError{ - Code: code, - msg: msg, - lang: lang, - } -} - -func marshalStatus(b []byte, err StatusError) []byte { - b = marshalUint32(b, err.Code) - b = marshalString(b, err.msg) - b = marshalString(b, err.lang) - return b -} - -// flags converts the flags passed to OpenFile into ssh flags. -// Unsupported flags are ignored. -func flags(f int) uint32 { - var out uint32 - switch f & os.O_WRONLY { - case os.O_WRONLY: - out |= ssh_FXF_WRITE - case os.O_RDONLY: - out |= ssh_FXF_READ - } - if f&os.O_RDWR == os.O_RDWR { - out |= ssh_FXF_READ | ssh_FXF_WRITE - } - if f&os.O_APPEND == os.O_APPEND { - out |= ssh_FXF_APPEND - } - if f&os.O_CREATE == os.O_CREATE { - out |= ssh_FXF_CREAT - } - if f&os.O_TRUNC == os.O_TRUNC { - out |= ssh_FXF_TRUNC - } - if f&os.O_EXCL == os.O_EXCL { - out |= ssh_FXF_EXCL - } - return out -} diff --git a/vendor/github.com/pkg/sftp/conn.go b/vendor/github.com/pkg/sftp/conn.go deleted file mode 100644 index 9b03d112f..000000000 --- a/vendor/github.com/pkg/sftp/conn.go +++ /dev/null @@ -1,122 +0,0 @@ -package sftp - -import ( - "encoding" - "io" - "sync" - - "github.com/pkg/errors" -) - -// conn implements a bidirectional channel on which client and server -// connections are multiplexed. -type conn struct { - io.Reader - io.WriteCloser - sync.Mutex // used to serialise writes to sendPacket -} - -func (c *conn) recvPacket() (uint8, []byte, error) { - return recvPacket(c) -} - -func (c *conn) sendPacket(m encoding.BinaryMarshaler) error { - c.Lock() - defer c.Unlock() - return sendPacket(c, m) -} - -type clientConn struct { - conn - wg sync.WaitGroup - sync.Mutex // protects inflight - inflight map[uint32]chan<- result // outstanding requests -} - -// Close closes the SFTP session. -func (c *clientConn) Close() error { - defer c.wg.Wait() - return c.conn.Close() -} - -func (c *clientConn) loop() { - defer c.wg.Done() - err := c.recv() - if err != nil { - c.broadcastErr(err) - } -} - -// recv continuously reads from the server and forwards responses to the -// appropriate channel. -func (c *clientConn) recv() error { - defer c.conn.Close() - for { - typ, data, err := c.recvPacket() - if err != nil { - return err - } - sid, _ := unmarshalUint32(data) - c.Lock() - ch, ok := c.inflight[sid] - delete(c.inflight, sid) - c.Unlock() - if !ok { - // This is an unexpected occurrence. Send the error - // back to all listeners so that they terminate - // gracefully. - return errors.Errorf("sid: %v not fond", sid) - } - ch <- result{typ: typ, data: data} - } -} - -// result captures the result of receiving the a packet from the server -type result struct { - typ byte - data []byte - err error -} - -type idmarshaler interface { - id() uint32 - encoding.BinaryMarshaler -} - -func (c *clientConn) sendPacket(p idmarshaler) (byte, []byte, error) { - ch := make(chan result, 1) - c.dispatchRequest(ch, p) - s := <-ch - return s.typ, s.data, s.err -} - -func (c *clientConn) dispatchRequest(ch chan<- result, p idmarshaler) { - c.Lock() - c.inflight[p.id()] = ch - if err := c.conn.sendPacket(p); err != nil { - delete(c.inflight, p.id()) - ch <- result{err: err} - } - c.Unlock() -} - -// broadcastErr sends an error to all goroutines waiting for a response. -func (c *clientConn) broadcastErr(err error) { - c.Lock() - listeners := make([]chan<- result, 0, len(c.inflight)) - for _, ch := range c.inflight { - listeners = append(listeners, ch) - } - c.Unlock() - for _, ch := range listeners { - ch <- result{err: err} - } -} - -type serverConn struct { - conn -} - -func (s *serverConn) sendError(p id, err error) error { - return s.sendPacket(statusFromError(p, err)) -} diff --git a/vendor/github.com/pkg/sftp/debug.go b/vendor/github.com/pkg/sftp/debug.go deleted file mode 100644 index 3e264abe3..000000000 --- a/vendor/github.com/pkg/sftp/debug.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build debug - -package sftp - -import "log" - -func debug(fmt string, args ...interface{}) { - log.Printf(fmt, args...) -} diff --git a/vendor/github.com/pkg/sftp/packet.go b/vendor/github.com/pkg/sftp/packet.go deleted file mode 100644 index fba415995..000000000 --- a/vendor/github.com/pkg/sftp/packet.go +++ /dev/null @@ -1,901 +0,0 @@ -package sftp - -import ( - "bytes" - "encoding" - "encoding/binary" - "fmt" - "io" - "os" - "reflect" - - "github.com/pkg/errors" -) - -var ( - errShortPacket = errors.New("packet too short") - errUnknownExtendedPacket = errors.New("unknown extended packet") -) - -const ( - debugDumpTxPacket = false - debugDumpRxPacket = false - debugDumpTxPacketBytes = false - debugDumpRxPacketBytes = false -) - -func marshalUint32(b []byte, v uint32) []byte { - return append(b, byte(v>>24), byte(v>>16), byte(v>>8), byte(v)) -} - -func marshalUint64(b []byte, v uint64) []byte { - return marshalUint32(marshalUint32(b, uint32(v>>32)), uint32(v)) -} - -func marshalString(b []byte, v string) []byte { - return append(marshalUint32(b, uint32(len(v))), v...) -} - -func marshal(b []byte, v interface{}) []byte { - if v == nil { - return b - } - switch v := v.(type) { - case uint8: - return append(b, v) - case uint32: - return marshalUint32(b, v) - case uint64: - return marshalUint64(b, v) - case string: - return marshalString(b, v) - case os.FileInfo: - return marshalFileInfo(b, v) - default: - switch d := reflect.ValueOf(v); d.Kind() { - case reflect.Struct: - for i, n := 0, d.NumField(); i < n; i++ { - b = append(marshal(b, d.Field(i).Interface())) - } - return b - case reflect.Slice: - for i, n := 0, d.Len(); i < n; i++ { - b = append(marshal(b, d.Index(i).Interface())) - } - return b - default: - panic(fmt.Sprintf("marshal(%#v): cannot handle type %T", v, v)) - } - } -} - -func unmarshalUint32(b []byte) (uint32, []byte) { - v := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 - return v, b[4:] -} - -func unmarshalUint32Safe(b []byte) (uint32, []byte, error) { - var v uint32 - if len(b) < 4 { - return 0, nil, errShortPacket - } - v, b = unmarshalUint32(b) - return v, b, nil -} - -func unmarshalUint64(b []byte) (uint64, []byte) { - h, b := unmarshalUint32(b) - l, b := unmarshalUint32(b) - return uint64(h)<<32 | uint64(l), b -} - -func unmarshalUint64Safe(b []byte) (uint64, []byte, error) { - var v uint64 - if len(b) < 8 { - return 0, nil, errShortPacket - } - v, b = unmarshalUint64(b) - return v, b, nil -} - -func unmarshalString(b []byte) (string, []byte) { - n, b := unmarshalUint32(b) - return string(b[:n]), b[n:] -} - -func unmarshalStringSafe(b []byte) (string, []byte, error) { - n, b, err := unmarshalUint32Safe(b) - if err != nil { - return "", nil, err - } - if int64(n) > int64(len(b)) { - return "", nil, errShortPacket - } - return string(b[:n]), b[n:], nil -} - -// sendPacket marshals p according to RFC 4234. -func sendPacket(w io.Writer, m encoding.BinaryMarshaler) error { - bb, err := m.MarshalBinary() - if err != nil { - return errors.Errorf("binary marshaller failed: %v", err) - } - if debugDumpTxPacketBytes { - debug("send packet: %s %d bytes %x", fxp(bb[0]), len(bb), bb[1:]) - } else if debugDumpTxPacket { - debug("send packet: %s %d bytes", fxp(bb[0]), len(bb)) - } - l := uint32(len(bb)) - hdr := []byte{byte(l >> 24), byte(l >> 16), byte(l >> 8), byte(l)} - _, err = w.Write(hdr) - if err != nil { - return errors.Errorf("failed to send packet header: %v", err) - } - _, err = w.Write(bb) - if err != nil { - return errors.Errorf("failed to send packet body: %v", err) - } - return nil -} - -func recvPacket(r io.Reader) (uint8, []byte, error) { - var b = []byte{0, 0, 0, 0} - if _, err := io.ReadFull(r, b); err != nil { - return 0, nil, err - } - l, _ := unmarshalUint32(b) - b = make([]byte, l) - if _, err := io.ReadFull(r, b); err != nil { - debug("recv packet %d bytes: err %v", l, err) - return 0, nil, err - } - if debugDumpRxPacketBytes { - debug("recv packet: %s %d bytes %x", fxp(b[0]), l, b[1:]) - } else if debugDumpRxPacket { - debug("recv packet: %s %d bytes", fxp(b[0]), l) - } - return b[0], b[1:], nil -} - -type extensionPair struct { - Name string - Data string -} - -func unmarshalExtensionPair(b []byte) (extensionPair, []byte, error) { - var ep extensionPair - var err error - ep.Name, b, err = unmarshalStringSafe(b) - if err != nil { - return ep, b, err - } - ep.Data, b, err = unmarshalStringSafe(b) - if err != nil { - return ep, b, err - } - return ep, b, err -} - -// Here starts the definition of packets along with their MarshalBinary -// implementations. -// Manually writing the marshalling logic wins us a lot of time and -// allocation. - -type sshFxInitPacket struct { - Version uint32 - Extensions []extensionPair -} - -func (p sshFxInitPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 // byte + uint32 - for _, e := range p.Extensions { - l += 4 + len(e.Name) + 4 + len(e.Data) - } - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_INIT) - b = marshalUint32(b, p.Version) - for _, e := range p.Extensions { - b = marshalString(b, e.Name) - b = marshalString(b, e.Data) - } - return b, nil -} - -func (p *sshFxInitPacket) UnmarshalBinary(b []byte) error { - var err error - if p.Version, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - for len(b) > 0 { - var ep extensionPair - ep, b, err = unmarshalExtensionPair(b) - if err != nil { - return err - } - p.Extensions = append(p.Extensions, ep) - } - return nil -} - -type sshFxVersionPacket struct { - Version uint32 - Extensions []struct { - Name, Data string - } -} - -func (p sshFxVersionPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 // byte + uint32 - for _, e := range p.Extensions { - l += 4 + len(e.Name) + 4 + len(e.Data) - } - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_VERSION) - b = marshalUint32(b, p.Version) - for _, e := range p.Extensions { - b = marshalString(b, e.Name) - b = marshalString(b, e.Data) - } - return b, nil -} - -func marshalIDString(packetType byte, id uint32, str string) ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(str) - - b := make([]byte, 0, l) - b = append(b, packetType) - b = marshalUint32(b, id) - b = marshalString(b, str) - return b, nil -} - -func unmarshalIDString(b []byte, id *uint32, str *string) error { - var err error - *id, b, err = unmarshalUint32Safe(b) - if err != nil { - return err - } - *str, b, err = unmarshalStringSafe(b) - return err -} - -type sshFxpReaddirPacket struct { - ID uint32 - Handle string -} - -func (p sshFxpReaddirPacket) id() uint32 { return p.ID } - -func (p sshFxpReaddirPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_READDIR, p.ID, p.Handle) -} - -func (p *sshFxpReaddirPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Handle) -} - -type sshFxpOpendirPacket struct { - ID uint32 - Path string -} - -func (p sshFxpOpendirPacket) id() uint32 { return p.ID } - -func (p sshFxpOpendirPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_OPENDIR, p.ID, p.Path) -} - -func (p *sshFxpOpendirPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpLstatPacket struct { - ID uint32 - Path string -} - -func (p sshFxpLstatPacket) id() uint32 { return p.ID } - -func (p sshFxpLstatPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_LSTAT, p.ID, p.Path) -} - -func (p *sshFxpLstatPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpStatPacket struct { - ID uint32 - Path string -} - -func (p sshFxpStatPacket) id() uint32 { return p.ID } - -func (p sshFxpStatPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_STAT, p.ID, p.Path) -} - -func (p *sshFxpStatPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpFstatPacket struct { - ID uint32 - Handle string -} - -func (p sshFxpFstatPacket) id() uint32 { return p.ID } - -func (p sshFxpFstatPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_FSTAT, p.ID, p.Handle) -} - -func (p *sshFxpFstatPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Handle) -} - -type sshFxpClosePacket struct { - ID uint32 - Handle string -} - -func (p sshFxpClosePacket) id() uint32 { return p.ID } - -func (p sshFxpClosePacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_CLOSE, p.ID, p.Handle) -} - -func (p *sshFxpClosePacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Handle) -} - -type sshFxpRemovePacket struct { - ID uint32 - Filename string -} - -func (p sshFxpRemovePacket) id() uint32 { return p.ID } - -func (p sshFxpRemovePacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_REMOVE, p.ID, p.Filename) -} - -func (p *sshFxpRemovePacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Filename) -} - -type sshFxpRmdirPacket struct { - ID uint32 - Path string -} - -func (p sshFxpRmdirPacket) id() uint32 { return p.ID } - -func (p sshFxpRmdirPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_RMDIR, p.ID, p.Path) -} - -func (p *sshFxpRmdirPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpSymlinkPacket struct { - ID uint32 - Targetpath string - Linkpath string -} - -func (p sshFxpSymlinkPacket) id() uint32 { return p.ID } - -func (p sshFxpSymlinkPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Targetpath) + - 4 + len(p.Linkpath) - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_SYMLINK) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Targetpath) - b = marshalString(b, p.Linkpath) - return b, nil -} - -func (p *sshFxpSymlinkPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Targetpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Linkpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -type sshFxpReadlinkPacket struct { - ID uint32 - Path string -} - -func (p sshFxpReadlinkPacket) id() uint32 { return p.ID } - -func (p sshFxpReadlinkPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_READLINK, p.ID, p.Path) -} - -func (p *sshFxpReadlinkPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpRealpathPacket struct { - ID uint32 - Path string -} - -func (p sshFxpRealpathPacket) id() uint32 { return p.ID } - -func (p sshFxpRealpathPacket) MarshalBinary() ([]byte, error) { - return marshalIDString(ssh_FXP_REALPATH, p.ID, p.Path) -} - -func (p *sshFxpRealpathPacket) UnmarshalBinary(b []byte) error { - return unmarshalIDString(b, &p.ID, &p.Path) -} - -type sshFxpNameAttr struct { - Name string - LongName string - Attrs []interface{} -} - -func (p sshFxpNameAttr) MarshalBinary() ([]byte, error) { - b := []byte{} - b = marshalString(b, p.Name) - b = marshalString(b, p.LongName) - for _, attr := range p.Attrs { - b = marshal(b, attr) - } - return b, nil -} - -type sshFxpNamePacket struct { - ID uint32 - NameAttrs []sshFxpNameAttr -} - -func (p sshFxpNamePacket) MarshalBinary() ([]byte, error) { - b := []byte{} - b = append(b, ssh_FXP_NAME) - b = marshalUint32(b, p.ID) - b = marshalUint32(b, uint32(len(p.NameAttrs))) - for _, na := range p.NameAttrs { - ab, err := na.MarshalBinary() - if err != nil { - return nil, err - } - - b = append(b, ab...) - } - return b, nil -} - -type sshFxpOpenPacket struct { - ID uint32 - Path string - Pflags uint32 - Flags uint32 // ignored -} - -func (p sshFxpOpenPacket) id() uint32 { return p.ID } - -func (p sshFxpOpenPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + - 4 + len(p.Path) + - 4 + 4 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_OPEN) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Path) - b = marshalUint32(b, p.Pflags) - b = marshalUint32(b, p.Flags) - return b, nil -} - -func (p *sshFxpOpenPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Pflags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - return nil -} - -type sshFxpReadPacket struct { - ID uint32 - Handle string - Offset uint64 - Len uint32 -} - -func (p sshFxpReadPacket) id() uint32 { return p.ID } - -func (p sshFxpReadPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Handle) + - 8 + 4 // uint64 + uint32 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_READ) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - b = marshalUint64(b, p.Offset) - b = marshalUint32(b, p.Len) - return b, nil -} - -func (p *sshFxpReadPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { - return err - } else if p.Len, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - return nil -} - -type sshFxpRenamePacket struct { - ID uint32 - Oldpath string - Newpath string -} - -func (p sshFxpRenamePacket) id() uint32 { return p.ID } - -func (p sshFxpRenamePacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Oldpath) + - 4 + len(p.Newpath) - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_RENAME) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Oldpath) - b = marshalString(b, p.Newpath) - return b, nil -} - -func (p *sshFxpRenamePacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Oldpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Newpath, b, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} - -type sshFxpWritePacket struct { - ID uint32 - Handle string - Offset uint64 - Length uint32 - Data []byte -} - -func (p sshFxpWritePacket) id() uint32 { return p.ID } - -func (p sshFxpWritePacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Handle) + - 8 + 4 + // uint64 + uint32 - len(p.Data) - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_WRITE) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - b = marshalUint64(b, p.Offset) - b = marshalUint32(b, p.Length) - b = append(b, p.Data...) - return b, nil -} - -func (p *sshFxpWritePacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Offset, b, err = unmarshalUint64Safe(b); err != nil { - return err - } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if uint32(len(b)) < p.Length { - return errShortPacket - } - - p.Data = append([]byte{}, b[:p.Length]...) - return nil -} - -type sshFxpMkdirPacket struct { - ID uint32 - Path string - Flags uint32 // ignored -} - -func (p sshFxpMkdirPacket) id() uint32 { return p.ID } - -func (p sshFxpMkdirPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Path) + - 4 // uint32 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_MKDIR) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Path) - b = marshalUint32(b, p.Flags) - return b, nil -} - -func (p *sshFxpMkdirPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - return nil -} - -type sshFxpSetstatPacket struct { - ID uint32 - Path string - Flags uint32 - Attrs interface{} -} - -type sshFxpFsetstatPacket struct { - ID uint32 - Handle string - Flags uint32 - Attrs interface{} -} - -func (p sshFxpSetstatPacket) id() uint32 { return p.ID } -func (p sshFxpFsetstatPacket) id() uint32 { return p.ID } - -func (p sshFxpSetstatPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Path) + - 4 // uint32 + uint64 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_SETSTAT) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Path) - b = marshalUint32(b, p.Flags) - b = marshal(b, p.Attrs) - return b, nil -} - -func (p sshFxpFsetstatPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - 4 + len(p.Handle) + - 4 // uint32 + uint64 - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_FSETSTAT) - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - b = marshalUint32(b, p.Flags) - b = marshal(b, p.Attrs) - return b, nil -} - -func (p *sshFxpSetstatPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - p.Attrs = b - return nil -} - -func (p *sshFxpFsetstatPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Handle, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Flags, b, err = unmarshalUint32Safe(b); err != nil { - return err - } - p.Attrs = b - return nil -} - -type sshFxpHandlePacket struct { - ID uint32 - Handle string -} - -func (p sshFxpHandlePacket) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_HANDLE} - b = marshalUint32(b, p.ID) - b = marshalString(b, p.Handle) - return b, nil -} - -type sshFxpStatusPacket struct { - ID uint32 - StatusError -} - -func (p sshFxpStatusPacket) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_STATUS} - b = marshalUint32(b, p.ID) - b = marshalStatus(b, p.StatusError) - return b, nil -} - -type sshFxpDataPacket struct { - ID uint32 - Length uint32 - Data []byte -} - -func (p sshFxpDataPacket) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_DATA} - b = marshalUint32(b, p.ID) - b = marshalUint32(b, p.Length) - b = append(b, p.Data[:p.Length]...) - return b, nil -} - -func (p *sshFxpDataPacket) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.Length, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if uint32(len(b)) < p.Length { - return errors.New("truncated packet") - } - - p.Data = make([]byte, p.Length) - copy(p.Data, b) - return nil -} - -type sshFxpStatvfsPacket struct { - ID uint32 - Path string -} - -func (p sshFxpStatvfsPacket) id() uint32 { return p.ID } - -func (p sshFxpStatvfsPacket) MarshalBinary() ([]byte, error) { - l := 1 + 4 + // type(byte) + uint32 - len(p.Path) + - len("statvfs@openssh.com") - - b := make([]byte, 0, l) - b = append(b, ssh_FXP_EXTENDED) - b = marshalUint32(b, p.ID) - b = marshalString(b, "statvfs@openssh.com") - b = marshalString(b, p.Path) - return b, nil -} - -// A StatVFS contains statistics about a filesystem. -type StatVFS struct { - ID uint32 - Bsize uint64 /* file system block size */ - Frsize uint64 /* fundamental fs block size */ - Blocks uint64 /* number of blocks (unit f_frsize) */ - Bfree uint64 /* free blocks in file system */ - Bavail uint64 /* free blocks for non-root */ - Files uint64 /* total file inodes */ - Ffree uint64 /* free file inodes */ - Favail uint64 /* free file inodes for to non-root */ - Fsid uint64 /* file system id */ - Flag uint64 /* bit mask of f_flag values */ - Namemax uint64 /* maximum filename length */ -} - -// TotalSpace calculates the amount of total space in a filesystem. -func (p *StatVFS) TotalSpace() uint64 { - return p.Frsize * p.Blocks -} - -// FreeSpace calculates the amount of free space in a filesystem. -func (p *StatVFS) FreeSpace() uint64 { - return p.Frsize * p.Bfree -} - -// Convert to ssh_FXP_EXTENDED_REPLY packet binary format -func (p *StatVFS) MarshalBinary() ([]byte, error) { - var buf bytes.Buffer - buf.Write([]byte{ssh_FXP_EXTENDED_REPLY}) - err := binary.Write(&buf, binary.BigEndian, p) - return buf.Bytes(), err -} - -type sshFxpExtendedPacket struct { - ID uint32 - ExtendedRequest string - SpecificPacket interface { - serverRespondablePacket - readonly() bool - } -} - -func (p sshFxpExtendedPacket) id() uint32 { return p.ID } -func (p sshFxpExtendedPacket) readonly() bool { return p.SpecificPacket.readonly() } - -func (p sshFxpExtendedPacket) respond(svr *Server) error { - return p.SpecificPacket.respond(svr) -} - -func (p *sshFxpExtendedPacket) UnmarshalBinary(b []byte) error { - var err error - bOrig := b - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { - return err - } - - // specific unmarshalling - switch p.ExtendedRequest { - case "statvfs@openssh.com": - p.SpecificPacket = &sshFxpExtendedPacketStatVFS{} - default: - return errUnknownExtendedPacket - } - - return p.SpecificPacket.UnmarshalBinary(bOrig) -} - -type sshFxpExtendedPacketStatVFS struct { - ID uint32 - ExtendedRequest string - Path string -} - -func (p sshFxpExtendedPacketStatVFS) id() uint32 { return p.ID } -func (p sshFxpExtendedPacketStatVFS) readonly() bool { return true } -func (p *sshFxpExtendedPacketStatVFS) UnmarshalBinary(b []byte) error { - var err error - if p.ID, b, err = unmarshalUint32Safe(b); err != nil { - return err - } else if p.ExtendedRequest, b, err = unmarshalStringSafe(b); err != nil { - return err - } else if p.Path, b, err = unmarshalStringSafe(b); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/pkg/sftp/release.go b/vendor/github.com/pkg/sftp/release.go deleted file mode 100644 index b695528fd..000000000 --- a/vendor/github.com/pkg/sftp/release.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !debug - -package sftp - -func debug(fmt string, args ...interface{}) {} diff --git a/vendor/github.com/pkg/sftp/server.go b/vendor/github.com/pkg/sftp/server.go deleted file mode 100644 index e097bda30..000000000 --- a/vendor/github.com/pkg/sftp/server.go +++ /dev/null @@ -1,607 +0,0 @@ -package sftp - -// sftp server counterpart - -import ( - "encoding" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "sync" - "syscall" - "time" - - "github.com/pkg/errors" -) - -const ( - sftpServerWorkerCount = 8 -) - -// Server is an SSH File Transfer Protocol (sftp) server. -// This is intended to provide the sftp subsystem to an ssh server daemon. -// This implementation currently supports most of sftp server protocol version 3, -// as specified at http://tools.ietf.org/html/draft-ietf-secsh-filexfer-02 -type Server struct { - serverConn - debugStream io.Writer - readOnly bool - pktChan chan rxPacket - openFiles map[string]*os.File - openFilesLock sync.RWMutex - handleCount int - maxTxPacket uint32 -} - -func (svr *Server) nextHandle(f *os.File) string { - svr.openFilesLock.Lock() - defer svr.openFilesLock.Unlock() - svr.handleCount++ - handle := strconv.Itoa(svr.handleCount) - svr.openFiles[handle] = f - return handle -} - -func (svr *Server) closeHandle(handle string) error { - svr.openFilesLock.Lock() - defer svr.openFilesLock.Unlock() - if f, ok := svr.openFiles[handle]; ok { - delete(svr.openFiles, handle) - return f.Close() - } - - return syscall.EBADF -} - -func (svr *Server) getHandle(handle string) (*os.File, bool) { - svr.openFilesLock.RLock() - defer svr.openFilesLock.RUnlock() - f, ok := svr.openFiles[handle] - return f, ok -} - -type serverRespondablePacket interface { - encoding.BinaryUnmarshaler - id() uint32 - respond(svr *Server) error -} - -// NewServer creates a new Server instance around the provided streams, serving -// content from the root of the filesystem. Optionally, ServerOption -// functions may be specified to further configure the Server. -// -// A subsequent call to Serve() is required to begin serving files over SFTP. -func NewServer(rwc io.ReadWriteCloser, options ...ServerOption) (*Server, error) { - s := &Server{ - serverConn: serverConn{ - conn: conn{ - Reader: rwc, - WriteCloser: rwc, - }, - }, - debugStream: ioutil.Discard, - pktChan: make(chan rxPacket, sftpServerWorkerCount), - openFiles: make(map[string]*os.File), - maxTxPacket: 1 << 15, - } - - for _, o := range options { - if err := o(s); err != nil { - return nil, err - } - } - - return s, nil -} - -// A ServerOption is a function which applies configuration to a Server. -type ServerOption func(*Server) error - -// WithDebug enables Server debugging output to the supplied io.Writer. -func WithDebug(w io.Writer) ServerOption { - return func(s *Server) error { - s.debugStream = w - return nil - } -} - -// ReadOnly configures a Server to serve files in read-only mode. -func ReadOnly() ServerOption { - return func(s *Server) error { - s.readOnly = true - return nil - } -} - -type rxPacket struct { - pktType fxp - pktBytes []byte -} - -// Up to N parallel servers -func (svr *Server) sftpServerWorker() error { - for p := range svr.pktChan { - var pkt interface { - encoding.BinaryUnmarshaler - id() uint32 - } - var readonly = true - switch p.pktType { - case ssh_FXP_INIT: - pkt = &sshFxInitPacket{} - case ssh_FXP_LSTAT: - pkt = &sshFxpLstatPacket{} - case ssh_FXP_OPEN: - pkt = &sshFxpOpenPacket{} - // readonly handled specially below - case ssh_FXP_CLOSE: - pkt = &sshFxpClosePacket{} - case ssh_FXP_READ: - pkt = &sshFxpReadPacket{} - case ssh_FXP_WRITE: - pkt = &sshFxpWritePacket{} - readonly = false - case ssh_FXP_FSTAT: - pkt = &sshFxpFstatPacket{} - case ssh_FXP_SETSTAT: - pkt = &sshFxpSetstatPacket{} - readonly = false - case ssh_FXP_FSETSTAT: - pkt = &sshFxpFsetstatPacket{} - readonly = false - case ssh_FXP_OPENDIR: - pkt = &sshFxpOpendirPacket{} - case ssh_FXP_READDIR: - pkt = &sshFxpReaddirPacket{} - case ssh_FXP_REMOVE: - pkt = &sshFxpRemovePacket{} - readonly = false - case ssh_FXP_MKDIR: - pkt = &sshFxpMkdirPacket{} - readonly = false - case ssh_FXP_RMDIR: - pkt = &sshFxpRmdirPacket{} - readonly = false - case ssh_FXP_REALPATH: - pkt = &sshFxpRealpathPacket{} - case ssh_FXP_STAT: - pkt = &sshFxpStatPacket{} - case ssh_FXP_RENAME: - pkt = &sshFxpRenamePacket{} - readonly = false - case ssh_FXP_READLINK: - pkt = &sshFxpReadlinkPacket{} - case ssh_FXP_SYMLINK: - pkt = &sshFxpSymlinkPacket{} - readonly = false - case ssh_FXP_EXTENDED: - pkt = &sshFxpExtendedPacket{} - default: - return errors.Errorf("unhandled packet type: %s", p.pktType) - } - if err := pkt.UnmarshalBinary(p.pktBytes); err != nil { - return err - } - - // handle FXP_OPENDIR specially - switch pkt := pkt.(type) { - case *sshFxpOpenPacket: - readonly = pkt.readonly() - case *sshFxpExtendedPacket: - readonly = pkt.SpecificPacket.readonly() - } - - // If server is operating read-only and a write operation is requested, - // return permission denied - if !readonly && svr.readOnly { - if err := svr.sendError(pkt, syscall.EPERM); err != nil { - return errors.Wrap(err, "failed to send read only packet response") - } - continue - } - - if err := handlePacket(svr, pkt); err != nil { - return err - } - } - return nil -} - -func handlePacket(s *Server, p interface{}) error { - switch p := p.(type) { - case *sshFxInitPacket: - return s.sendPacket(sshFxVersionPacket{sftpProtocolVersion, nil}) - case *sshFxpStatPacket: - // stat the requested file - info, err := os.Stat(p.Path) - if err != nil { - return s.sendError(p, err) - } - return s.sendPacket(sshFxpStatResponse{ - ID: p.ID, - info: info, - }) - case *sshFxpLstatPacket: - // stat the requested file - info, err := os.Lstat(p.Path) - if err != nil { - return s.sendError(p, err) - } - return s.sendPacket(sshFxpStatResponse{ - ID: p.ID, - info: info, - }) - case *sshFxpFstatPacket: - f, ok := s.getHandle(p.Handle) - if !ok { - return s.sendError(p, syscall.EBADF) - } - - info, err := f.Stat() - if err != nil { - return s.sendError(p, err) - } - - return s.sendPacket(sshFxpStatResponse{ - ID: p.ID, - info: info, - }) - case *sshFxpMkdirPacket: - // TODO FIXME: ignore flags field - err := os.Mkdir(p.Path, 0755) - return s.sendError(p, err) - case *sshFxpRmdirPacket: - err := os.Remove(p.Path) - return s.sendError(p, err) - case *sshFxpRemovePacket: - err := os.Remove(p.Filename) - return s.sendError(p, err) - case *sshFxpRenamePacket: - err := os.Rename(p.Oldpath, p.Newpath) - return s.sendError(p, err) - case *sshFxpSymlinkPacket: - err := os.Symlink(p.Targetpath, p.Linkpath) - return s.sendError(p, err) - case *sshFxpClosePacket: - return s.sendError(p, s.closeHandle(p.Handle)) - case *sshFxpReadlinkPacket: - f, err := os.Readlink(p.Path) - if err != nil { - return s.sendError(p, err) - } - - return s.sendPacket(sshFxpNamePacket{ - ID: p.ID, - NameAttrs: []sshFxpNameAttr{{ - Name: f, - LongName: f, - Attrs: emptyFileStat, - }}, - }) - - case *sshFxpRealpathPacket: - f, err := filepath.Abs(p.Path) - if err != nil { - return s.sendError(p, err) - } - f = filepath.Clean(f) - return s.sendPacket(sshFxpNamePacket{ - ID: p.ID, - NameAttrs: []sshFxpNameAttr{{ - Name: f, - LongName: f, - Attrs: emptyFileStat, - }}, - }) - case *sshFxpOpendirPacket: - return sshFxpOpenPacket{ - ID: p.ID, - Path: p.Path, - Pflags: ssh_FXF_READ, - }.respond(s) - case *sshFxpReadPacket: - f, ok := s.getHandle(p.Handle) - if !ok { - return s.sendError(p, syscall.EBADF) - } - - data := make([]byte, clamp(p.Len, s.maxTxPacket)) - n, err := f.ReadAt(data, int64(p.Offset)) - if err != nil && (err != io.EOF || n == 0) { - return s.sendError(p, err) - } - return s.sendPacket(sshFxpDataPacket{ - ID: p.ID, - Length: uint32(n), - Data: data[:n], - }) - case *sshFxpWritePacket: - f, ok := s.getHandle(p.Handle) - if !ok { - return s.sendError(p, syscall.EBADF) - } - - _, err := f.WriteAt(p.Data, int64(p.Offset)) - return s.sendError(p, err) - case serverRespondablePacket: - err := p.respond(s) - return errors.Wrap(err, "pkt.respond failed") - default: - return errors.Errorf("unexpected packet type %T", p) - } -} - -// Serve serves SFTP connections until the streams stop or the SFTP subsystem -// is stopped. -func (svr *Server) Serve() error { - var wg sync.WaitGroup - wg.Add(sftpServerWorkerCount) - for i := 0; i < sftpServerWorkerCount; i++ { - go func() { - defer wg.Done() - if err := svr.sftpServerWorker(); err != nil { - svr.conn.Close() // shuts down recvPacket - } - }() - } - - var err error - var pktType uint8 - var pktBytes []byte - for { - pktType, pktBytes, err = svr.recvPacket() - if err != nil { - break - } - svr.pktChan <- rxPacket{fxp(pktType), pktBytes} - } - - close(svr.pktChan) // shuts down sftpServerWorkers - wg.Wait() // wait for all workers to exit - - // close any still-open files - for handle, file := range svr.openFiles { - fmt.Fprintf(svr.debugStream, "sftp server file with handle %q left open: %v\n", handle, file.Name()) - file.Close() - } - return err // error from recvPacket -} - -type id interface { - id() uint32 -} - -// The init packet has no ID, so we just return a zero-value ID -func (p sshFxInitPacket) id() uint32 { return 0 } - -type sshFxpStatResponse struct { - ID uint32 - info os.FileInfo -} - -func (p sshFxpStatResponse) MarshalBinary() ([]byte, error) { - b := []byte{ssh_FXP_ATTRS} - b = marshalUint32(b, p.ID) - b = marshalFileInfo(b, p.info) - return b, nil -} - -var emptyFileStat = []interface{}{uint32(0)} - -func (p sshFxpOpenPacket) readonly() bool { - return !p.hasPflags(ssh_FXF_WRITE) -} - -func (p sshFxpOpenPacket) hasPflags(flags ...uint32) bool { - for _, f := range flags { - if p.Pflags&f == 0 { - return false - } - } - return true -} - -func (p sshFxpOpenPacket) respond(svr *Server) error { - var osFlags int - if p.hasPflags(ssh_FXF_READ, ssh_FXF_WRITE) { - osFlags |= os.O_RDWR - } else if p.hasPflags(ssh_FXF_WRITE) { - osFlags |= os.O_WRONLY - } else if p.hasPflags(ssh_FXF_READ) { - osFlags |= os.O_RDONLY - } else { - // how are they opening? - return svr.sendError(p, syscall.EINVAL) - } - - if p.hasPflags(ssh_FXF_APPEND) { - osFlags |= os.O_APPEND - } - if p.hasPflags(ssh_FXF_CREAT) { - osFlags |= os.O_CREATE - } - if p.hasPflags(ssh_FXF_TRUNC) { - osFlags |= os.O_TRUNC - } - if p.hasPflags(ssh_FXF_EXCL) { - osFlags |= os.O_EXCL - } - - f, err := os.OpenFile(p.Path, osFlags, 0644) - if err != nil { - return svr.sendError(p, err) - } - - handle := svr.nextHandle(f) - return svr.sendPacket(sshFxpHandlePacket{p.ID, handle}) -} - -func (p sshFxpReaddirPacket) respond(svr *Server) error { - f, ok := svr.getHandle(p.Handle) - if !ok { - return svr.sendError(p, syscall.EBADF) - } - - dirname := f.Name() - dirents, err := f.Readdir(128) - if err != nil { - return svr.sendError(p, err) - } - - ret := sshFxpNamePacket{ID: p.ID} - for _, dirent := range dirents { - ret.NameAttrs = append(ret.NameAttrs, sshFxpNameAttr{ - Name: dirent.Name(), - LongName: runLs(dirname, dirent), - Attrs: []interface{}{dirent}, - }) - } - return svr.sendPacket(ret) -} - -func (p sshFxpSetstatPacket) respond(svr *Server) error { - // additional unmarshalling is required for each possibility here - b := p.Attrs.([]byte) - var err error - - debug("setstat name \"%s\"", p.Path) - if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { - var size uint64 - if size, b, err = unmarshalUint64Safe(b); err == nil { - err = os.Truncate(p.Path, int64(size)) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { - var mode uint32 - if mode, b, err = unmarshalUint32Safe(b); err == nil { - err = os.Chmod(p.Path, os.FileMode(mode)) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { - var atime uint32 - var mtime uint32 - if atime, b, err = unmarshalUint32Safe(b); err != nil { - } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { - } else { - atimeT := time.Unix(int64(atime), 0) - mtimeT := time.Unix(int64(mtime), 0) - err = os.Chtimes(p.Path, atimeT, mtimeT) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { - var uid uint32 - var gid uint32 - if uid, b, err = unmarshalUint32Safe(b); err != nil { - } else if gid, b, err = unmarshalUint32Safe(b); err != nil { - } else { - err = os.Chown(p.Path, int(uid), int(gid)) - } - } - - return svr.sendError(p, err) -} - -func (p sshFxpFsetstatPacket) respond(svr *Server) error { - f, ok := svr.getHandle(p.Handle) - if !ok { - return svr.sendError(p, syscall.EBADF) - } - - // additional unmarshalling is required for each possibility here - b := p.Attrs.([]byte) - var err error - - debug("fsetstat name \"%s\"", f.Name()) - if (p.Flags & ssh_FILEXFER_ATTR_SIZE) != 0 { - var size uint64 - if size, b, err = unmarshalUint64Safe(b); err == nil { - err = f.Truncate(int64(size)) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_PERMISSIONS) != 0 { - var mode uint32 - if mode, b, err = unmarshalUint32Safe(b); err == nil { - err = f.Chmod(os.FileMode(mode)) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_ACMODTIME) != 0 { - var atime uint32 - var mtime uint32 - if atime, b, err = unmarshalUint32Safe(b); err != nil { - } else if mtime, b, err = unmarshalUint32Safe(b); err != nil { - } else { - atimeT := time.Unix(int64(atime), 0) - mtimeT := time.Unix(int64(mtime), 0) - err = os.Chtimes(f.Name(), atimeT, mtimeT) - } - } - if (p.Flags & ssh_FILEXFER_ATTR_UIDGID) != 0 { - var uid uint32 - var gid uint32 - if uid, b, err = unmarshalUint32Safe(b); err != nil { - } else if gid, b, err = unmarshalUint32Safe(b); err != nil { - } else { - err = f.Chown(int(uid), int(gid)) - } - } - - return svr.sendError(p, err) -} - -// translateErrno translates a syscall error number to a SFTP error code. -func translateErrno(errno syscall.Errno) uint32 { - switch errno { - case 0: - return ssh_FX_OK - case syscall.ENOENT: - return ssh_FX_NO_SUCH_FILE - case syscall.EPERM: - return ssh_FX_PERMISSION_DENIED - } - - return ssh_FX_FAILURE -} - -func statusFromError(p id, err error) sshFxpStatusPacket { - ret := sshFxpStatusPacket{ - ID: p.id(), - StatusError: StatusError{ - // ssh_FX_OK = 0 - // ssh_FX_EOF = 1 - // ssh_FX_NO_SUCH_FILE = 2 ENOENT - // ssh_FX_PERMISSION_DENIED = 3 - // ssh_FX_FAILURE = 4 - // ssh_FX_BAD_MESSAGE = 5 - // ssh_FX_NO_CONNECTION = 6 - // ssh_FX_CONNECTION_LOST = 7 - // ssh_FX_OP_UNSUPPORTED = 8 - Code: ssh_FX_OK, - }, - } - if err != nil { - debug("statusFromError: error is %T %#v", err, err) - ret.StatusError.Code = ssh_FX_FAILURE - ret.StatusError.msg = err.Error() - if err == io.EOF { - ret.StatusError.Code = ssh_FX_EOF - } else if errno, ok := err.(syscall.Errno); ok { - ret.StatusError.Code = translateErrno(errno) - } else if pathError, ok := err.(*os.PathError); ok { - debug("statusFromError: error is %T %#v", pathError.Err, pathError.Err) - if errno, ok := pathError.Err.(syscall.Errno); ok { - ret.StatusError.Code = translateErrno(errno) - } - } - } - return ret -} - -func clamp(v, max uint32) uint32 { - if v > max { - return max - } - return v -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go b/vendor/github.com/pkg/sftp/server_statvfs_darwin.go deleted file mode 100644 index 8c01dac52..000000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_darwin.go +++ /dev/null @@ -1,21 +0,0 @@ -package sftp - -import ( - "syscall" -) - -func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { - return &StatVFS{ - Bsize: uint64(stat.Bsize), - Frsize: uint64(stat.Bsize), // fragment size is a linux thing; use block size here - Blocks: stat.Blocks, - Bfree: stat.Bfree, - Bavail: stat.Bavail, - Files: stat.Files, - Ffree: stat.Ffree, - Favail: stat.Ffree, // not sure how to calculate Favail - Fsid: uint64(uint64(stat.Fsid.Val[1])<<32 | uint64(stat.Fsid.Val[0])), // endianness? - Flag: uint64(stat.Flags), // assuming POSIX? - Namemax: 1024, // man 2 statfs shows: #define MAXPATHLEN 1024 - }, nil -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_impl.go b/vendor/github.com/pkg/sftp/server_statvfs_impl.go deleted file mode 100644 index c26870ebe..000000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_impl.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build darwin linux,!gccgo - -// fill in statvfs structure with OS specific values -// Statfs_t is different per-kernel, and only exists on some unixes (not Solaris for instance) - -package sftp - -import ( - "syscall" -) - -func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error { - stat := &syscall.Statfs_t{} - if err := syscall.Statfs(p.Path, stat); err != nil { - return svr.sendPacket(statusFromError(p, err)) - } - - retPkt, err := statvfsFromStatfst(stat) - if err != nil { - return svr.sendPacket(statusFromError(p, err)) - } - retPkt.ID = p.ID - - return svr.sendPacket(retPkt) -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_linux.go b/vendor/github.com/pkg/sftp/server_statvfs_linux.go deleted file mode 100644 index 43478e890..000000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_linux.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !gccgo,linux - -package sftp - -import ( - "syscall" -) - -func statvfsFromStatfst(stat *syscall.Statfs_t) (*StatVFS, error) { - return &StatVFS{ - Bsize: uint64(stat.Bsize), - Frsize: uint64(stat.Frsize), - Blocks: stat.Blocks, - Bfree: stat.Bfree, - Bavail: stat.Bavail, - Files: stat.Files, - Ffree: stat.Ffree, - Favail: stat.Ffree, // not sure how to calculate Favail - Flag: uint64(stat.Flags), // assuming POSIX? - Namemax: uint64(stat.Namelen), - }, nil -} diff --git a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go b/vendor/github.com/pkg/sftp/server_statvfs_stubs.go deleted file mode 100644 index 1512a132b..000000000 --- a/vendor/github.com/pkg/sftp/server_statvfs_stubs.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !darwin,!linux gccgo - -package sftp - -import ( - "syscall" -) - -func (p sshFxpExtendedPacketStatVFS) respond(svr *Server) error { - return syscall.ENOTSUP -} diff --git a/vendor/github.com/pkg/sftp/server_stubs.go b/vendor/github.com/pkg/sftp/server_stubs.go deleted file mode 100644 index 3b1ddbdbb..000000000 --- a/vendor/github.com/pkg/sftp/server_stubs.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !cgo,!plan9 windows android - -package sftp - -import ( - "os" - "path" -) - -func runLs(dirname string, dirent os.FileInfo) string { - return path.Join(dirname, dirent.Name()) -} diff --git a/vendor/github.com/pkg/sftp/server_unix.go b/vendor/github.com/pkg/sftp/server_unix.go deleted file mode 100644 index 8c3f0b44e..000000000 --- a/vendor/github.com/pkg/sftp/server_unix.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build darwin dragonfly freebsd !android,linux netbsd openbsd solaris -// +build cgo - -package sftp - -import ( - "fmt" - "os" - "path" - "syscall" - "time" -) - -func runLsTypeWord(dirent os.FileInfo) string { - // find first character, the type char - // b Block special file. - // c Character special file. - // d Directory. - // l Symbolic link. - // s Socket link. - // p FIFO. - // - Regular file. - tc := '-' - mode := dirent.Mode() - if (mode & os.ModeDir) != 0 { - tc = 'd' - } else if (mode & os.ModeDevice) != 0 { - tc = 'b' - if (mode & os.ModeCharDevice) != 0 { - tc = 'c' - } - } else if (mode & os.ModeSymlink) != 0 { - tc = 'l' - } else if (mode & os.ModeSocket) != 0 { - tc = 's' - } else if (mode & os.ModeNamedPipe) != 0 { - tc = 'p' - } - - // owner - orc := '-' - if (mode & 0400) != 0 { - orc = 'r' - } - owc := '-' - if (mode & 0200) != 0 { - owc = 'w' - } - oxc := '-' - ox := (mode & 0100) != 0 - setuid := (mode & os.ModeSetuid) != 0 - if ox && setuid { - oxc = 's' - } else if setuid { - oxc = 'S' - } else if ox { - oxc = 'x' - } - - // group - grc := '-' - if (mode & 040) != 0 { - grc = 'r' - } - gwc := '-' - if (mode & 020) != 0 { - gwc = 'w' - } - gxc := '-' - gx := (mode & 010) != 0 - setgid := (mode & os.ModeSetgid) != 0 - if gx && setgid { - gxc = 's' - } else if setgid { - gxc = 'S' - } else if gx { - gxc = 'x' - } - - // all / others - arc := '-' - if (mode & 04) != 0 { - arc = 'r' - } - awc := '-' - if (mode & 02) != 0 { - awc = 'w' - } - axc := '-' - ax := (mode & 01) != 0 - sticky := (mode & os.ModeSticky) != 0 - if ax && sticky { - axc = 't' - } else if sticky { - axc = 'T' - } else if ax { - axc = 'x' - } - - return fmt.Sprintf("%c%c%c%c%c%c%c%c%c%c", tc, orc, owc, oxc, grc, gwc, gxc, arc, awc, axc) -} - -func runLsStatt(dirname string, dirent os.FileInfo, statt *syscall.Stat_t) string { - // example from openssh sftp server: - // crw-rw-rw- 1 root wheel 0 Jul 31 20:52 ttyvd - // format: - // {directory / char device / etc}{rwxrwxrwx} {number of links} owner group size month day [time (this year) | year (otherwise)] name - - typeword := runLsTypeWord(dirent) - numLinks := statt.Nlink - uid := statt.Uid - gid := statt.Gid - username := fmt.Sprintf("%d", uid) - groupname := fmt.Sprintf("%d", gid) - // TODO FIXME: uid -> username, gid -> groupname lookup for ls -l format output - - mtime := dirent.ModTime() - monthStr := mtime.Month().String()[0:3] - day := mtime.Day() - year := mtime.Year() - now := time.Now() - isOld := mtime.Before(now.Add(-time.Hour * 24 * 365 / 2)) - - yearOrTime := fmt.Sprintf("%02d:%02d", mtime.Hour(), mtime.Minute()) - if isOld { - yearOrTime = fmt.Sprintf("%d", year) - } - - return fmt.Sprintf("%s %4d %-8s %-8s %8d %s %2d %5s %s", typeword, numLinks, username, groupname, dirent.Size(), monthStr, day, yearOrTime, dirent.Name()) -} - -// ls -l style output for a file, which is in the 'long output' section of a readdir response packet -// this is a very simple (lazy) implementation, just enough to look almost like openssh in a few basic cases -func runLs(dirname string, dirent os.FileInfo) string { - dsys := dirent.Sys() - if dsys == nil { - } else if statt, ok := dsys.(*syscall.Stat_t); !ok { - } else { - return runLsStatt(dirname, dirent, statt) - } - - return path.Join(dirname, dirent.Name()) -} diff --git a/vendor/github.com/pkg/sftp/sftp.go b/vendor/github.com/pkg/sftp/sftp.go deleted file mode 100644 index 22184afe0..000000000 --- a/vendor/github.com/pkg/sftp/sftp.go +++ /dev/null @@ -1,217 +0,0 @@ -// Package sftp implements the SSH File Transfer Protocol as described in -// https://filezilla-project.org/specs/draft-ietf-secsh-filexfer-02.txt -package sftp - -import ( - "fmt" - - "github.com/pkg/errors" -) - -const ( - ssh_FXP_INIT = 1 - ssh_FXP_VERSION = 2 - ssh_FXP_OPEN = 3 - ssh_FXP_CLOSE = 4 - ssh_FXP_READ = 5 - ssh_FXP_WRITE = 6 - ssh_FXP_LSTAT = 7 - ssh_FXP_FSTAT = 8 - ssh_FXP_SETSTAT = 9 - ssh_FXP_FSETSTAT = 10 - ssh_FXP_OPENDIR = 11 - ssh_FXP_READDIR = 12 - ssh_FXP_REMOVE = 13 - ssh_FXP_MKDIR = 14 - ssh_FXP_RMDIR = 15 - ssh_FXP_REALPATH = 16 - ssh_FXP_STAT = 17 - ssh_FXP_RENAME = 18 - ssh_FXP_READLINK = 19 - ssh_FXP_SYMLINK = 20 - ssh_FXP_STATUS = 101 - ssh_FXP_HANDLE = 102 - ssh_FXP_DATA = 103 - ssh_FXP_NAME = 104 - ssh_FXP_ATTRS = 105 - ssh_FXP_EXTENDED = 200 - ssh_FXP_EXTENDED_REPLY = 201 -) - -const ( - ssh_FX_OK = 0 - ssh_FX_EOF = 1 - ssh_FX_NO_SUCH_FILE = 2 - ssh_FX_PERMISSION_DENIED = 3 - ssh_FX_FAILURE = 4 - ssh_FX_BAD_MESSAGE = 5 - ssh_FX_NO_CONNECTION = 6 - ssh_FX_CONNECTION_LOST = 7 - ssh_FX_OP_UNSUPPORTED = 8 - - // see draft-ietf-secsh-filexfer-13 - // https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13#section-9.1 - ssh_FX_INVALID_HANDLE = 9 - ssh_FX_NO_SUCH_PATH = 10 - ssh_FX_FILE_ALREADY_EXISTS = 11 - ssh_FX_WRITE_PROTECT = 12 - ssh_FX_NO_MEDIA = 13 - ssh_FX_NO_SPACE_ON_FILESYSTEM = 14 - ssh_FX_QUOTA_EXCEEDED = 15 - ssh_FX_UNKNOWN_PRINCIPAL = 16 - ssh_FX_LOCK_CONFLICT = 17 - ssh_FX_DIR_NOT_EMPTY = 18 - ssh_FX_NOT_A_DIRECTORY = 19 - ssh_FX_INVALID_FILENAME = 20 - ssh_FX_LINK_LOOP = 21 - ssh_FX_CANNOT_DELETE = 22 - ssh_FX_INVALID_PARAMETER = 23 - ssh_FX_FILE_IS_A_DIRECTORY = 24 - ssh_FX_BYTE_RANGE_LOCK_CONFLICT = 25 - ssh_FX_BYTE_RANGE_LOCK_REFUSED = 26 - ssh_FX_DELETE_PENDING = 27 - ssh_FX_FILE_CORRUPT = 28 - ssh_FX_OWNER_INVALID = 29 - ssh_FX_GROUP_INVALID = 30 - ssh_FX_NO_MATCHING_BYTE_RANGE_LOCK = 31 -) - -const ( - ssh_FXF_READ = 0x00000001 - ssh_FXF_WRITE = 0x00000002 - ssh_FXF_APPEND = 0x00000004 - ssh_FXF_CREAT = 0x00000008 - ssh_FXF_TRUNC = 0x00000010 - ssh_FXF_EXCL = 0x00000020 -) - -type fxp uint8 - -func (f fxp) String() string { - switch f { - case ssh_FXP_INIT: - return "SSH_FXP_INIT" - case ssh_FXP_VERSION: - return "SSH_FXP_VERSION" - case ssh_FXP_OPEN: - return "SSH_FXP_OPEN" - case ssh_FXP_CLOSE: - return "SSH_FXP_CLOSE" - case ssh_FXP_READ: - return "SSH_FXP_READ" - case ssh_FXP_WRITE: - return "SSH_FXP_WRITE" - case ssh_FXP_LSTAT: - return "SSH_FXP_LSTAT" - case ssh_FXP_FSTAT: - return "SSH_FXP_FSTAT" - case ssh_FXP_SETSTAT: - return "SSH_FXP_SETSTAT" - case ssh_FXP_FSETSTAT: - return "SSH_FXP_FSETSTAT" - case ssh_FXP_OPENDIR: - return "SSH_FXP_OPENDIR" - case ssh_FXP_READDIR: - return "SSH_FXP_READDIR" - case ssh_FXP_REMOVE: - return "SSH_FXP_REMOVE" - case ssh_FXP_MKDIR: - return "SSH_FXP_MKDIR" - case ssh_FXP_RMDIR: - return "SSH_FXP_RMDIR" - case ssh_FXP_REALPATH: - return "SSH_FXP_REALPATH" - case ssh_FXP_STAT: - return "SSH_FXP_STAT" - case ssh_FXP_RENAME: - return "SSH_FXP_RENAME" - case ssh_FXP_READLINK: - return "SSH_FXP_READLINK" - case ssh_FXP_SYMLINK: - return "SSH_FXP_SYMLINK" - case ssh_FXP_STATUS: - return "SSH_FXP_STATUS" - case ssh_FXP_HANDLE: - return "SSH_FXP_HANDLE" - case ssh_FXP_DATA: - return "SSH_FXP_DATA" - case ssh_FXP_NAME: - return "SSH_FXP_NAME" - case ssh_FXP_ATTRS: - return "SSH_FXP_ATTRS" - case ssh_FXP_EXTENDED: - return "SSH_FXP_EXTENDED" - case ssh_FXP_EXTENDED_REPLY: - return "SSH_FXP_EXTENDED_REPLY" - default: - return "unknown" - } -} - -type fx uint8 - -func (f fx) String() string { - switch f { - case ssh_FX_OK: - return "SSH_FX_OK" - case ssh_FX_EOF: - return "SSH_FX_EOF" - case ssh_FX_NO_SUCH_FILE: - return "SSH_FX_NO_SUCH_FILE" - case ssh_FX_PERMISSION_DENIED: - return "SSH_FX_PERMISSION_DENIED" - case ssh_FX_FAILURE: - return "SSH_FX_FAILURE" - case ssh_FX_BAD_MESSAGE: - return "SSH_FX_BAD_MESSAGE" - case ssh_FX_NO_CONNECTION: - return "SSH_FX_NO_CONNECTION" - case ssh_FX_CONNECTION_LOST: - return "SSH_FX_CONNECTION_LOST" - case ssh_FX_OP_UNSUPPORTED: - return "SSH_FX_OP_UNSUPPORTED" - default: - return "unknown" - } -} - -type unexpectedPacketErr struct { - want, got uint8 -} - -func (u *unexpectedPacketErr) Error() string { - return fmt.Sprintf("sftp: unexpected packet: want %v, got %v", fxp(u.want), fxp(u.got)) -} - -func unimplementedPacketErr(u uint8) error { - return errors.Errorf("sftp: unimplemented packet type: got %v", fxp(u)) -} - -type unexpectedIDErr struct{ want, got uint32 } - -func (u *unexpectedIDErr) Error() string { - return fmt.Sprintf("sftp: unexpected id: want %v, got %v", u.want, u.got) -} - -func unimplementedSeekWhence(whence int) error { - return errors.Errorf("sftp: unimplemented seek whence %v", whence) -} - -func unexpectedCount(want, got uint32) error { - return errors.Errorf("sftp: unexpected count: want %v, got %v", want, got) -} - -type unexpectedVersionErr struct{ want, got uint32 } - -func (u *unexpectedVersionErr) Error() string { - return fmt.Sprintf("sftp: unexpected server version: want %v, got %v", u.want, u.got) -} - -// A StatusError is returned when an SFTP operation fails, and provides -// additional information about the failure. -type StatusError struct { - Code uint32 - msg, lang string -} - -func (s *StatusError) Error() string { return fmt.Sprintf("sftp: %q (%v)", s.msg, fx(s.Code)) } diff --git a/vendor/github.com/spf13/afero/README.md b/vendor/github.com/spf13/afero/README.md index db2081718..d9e332730 100644 --- a/vendor/github.com/spf13/afero/README.md +++ b/vendor/github.com/spf13/afero/README.md @@ -198,17 +198,17 @@ independent, with no test relying on the state left by an earlier test. Then in my tests I would initialize a new MemMapFs for each test: ```go func TestExist(t *testing.T) { - appFS = afero.NewMemMapFs() + appFS := afero.NewMemMapFs() // create test files and directories - appFS.MkdirAll("src/a", 0755)) + appFS.MkdirAll("src/a", 0755) afero.WriteFile(appFS, "src/a/b", []byte("file b"), 0644) afero.WriteFile(appFS, "src/c", []byte("file c"), 0644) - _, err := appFS.Stat("src/c") + name := "src/c" + _, err := appFS.Stat(name) if os.IsNotExist(err) { - t.Errorf("file \"%s\" does not exist.\n", name) + t.Errorf("file \"%s\" does not exist.\n", name) } } - ``` # Available Backends diff --git a/vendor/github.com/spf13/afero/afero.go b/vendor/github.com/spf13/afero/afero.go index e3df3f43c..f5b5e127c 100644 --- a/vendor/github.com/spf13/afero/afero.go +++ b/vendor/github.com/spf13/afero/afero.go @@ -77,7 +77,7 @@ type Fs interface { // happens. Remove(name string) error - // RemoveAll removes a directory path and all any children it contains. It + // RemoveAll removes a directory path and any children it contains. It // does not fail if the path does not exist (return nil). RemoveAll(path string) error diff --git a/vendor/github.com/spf13/afero/sftp.go b/vendor/github.com/spf13/afero/sftp.go deleted file mode 100644 index a095a54c6..000000000 --- a/vendor/github.com/spf13/afero/sftp.go +++ /dev/null @@ -1,128 +0,0 @@ -// Copyright © 2015 Jerry Jacobs . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "os" - "time" - - "github.com/spf13/afero/sftp" - - "github.com/pkg/sftp" -) - -// SftpFs is a Fs implementation that uses functions provided by the sftp package. -// -// For details in any method, check the documentation of the sftp package -// (github.com/pkg/sftp). -type SftpFs struct{ - SftpClient *sftp.Client -} - -func (s SftpFs) Name() string { return "SftpFs" } - -func (s SftpFs) Create(name string) (File, error) { - f, err := sftpfs.FileCreate(s.SftpClient, name) - return f, err -} - -func (s SftpFs) Mkdir(name string, perm os.FileMode) error { - err := s.SftpClient.Mkdir(name) - if err != nil { - return err - } - return s.SftpClient.Chmod(name, perm) -} - -func (s SftpFs) MkdirAll(path string, perm os.FileMode) error { - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := s.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return err - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = s.MkdirAll(path[0:j-1], perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = s.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := s.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -func (s SftpFs) Open(name string) (File, error) { - f, err := sftpfs.FileOpen(s.SftpClient, name) - return f, err -} - -func (s SftpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) { - return nil,nil -} - -func (s SftpFs) Remove(name string) error { - return s.SftpClient.Remove(name) -} - -func (s SftpFs) RemoveAll(path string) error { - // TODO have a look at os.RemoveAll - // https://github.com/golang/go/blob/master/src/os/path.go#L66 - return nil -} - -func (s SftpFs) Rename(oldname, newname string) error { - return s.SftpClient.Rename(oldname, newname) -} - -func (s SftpFs) Stat(name string) (os.FileInfo, error) { - return s.SftpClient.Stat(name) -} - -func (s SftpFs) Lstat(p string) (os.FileInfo, error) { - return s.SftpClient.Lstat(p) -} - -func (s SftpFs) Chmod(name string, mode os.FileMode) error { - return s.SftpClient.Chmod(name, mode) -} - -func (s SftpFs) Chtimes(name string, atime time.Time, mtime time.Time) error { - return s.SftpClient.Chtimes(name, atime, mtime) -} diff --git a/vendor/github.com/spf13/afero/sftp/file.go b/vendor/github.com/spf13/afero/sftp/file.go deleted file mode 100644 index bab4abc89..000000000 --- a/vendor/github.com/spf13/afero/sftp/file.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright © 2015 Jerry Jacobs . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package sftpfs - -import ( - "os" - "github.com/pkg/sftp" -) - -type File struct { - fd *sftp.File -} - -func FileOpen(s *sftp.Client, name string) (*File, error) { - fd, err := s.Open(name) - if err != nil { - return &File{}, err - } - return &File{fd: fd}, nil -} - -func FileCreate(s *sftp.Client, name string) (*File, error) { - fd, err := s.Create(name) - if err != nil { - return &File{}, err - } - return &File{fd: fd}, nil -} - -func (f *File) Close() error { - return f.fd.Close() -} - -func (f *File) Name() string { - return f.fd.Name() -} - -func (f *File) Stat() (os.FileInfo, error) { - return f.fd.Stat() -} - -func (f *File) Sync() error { - return nil -} - -func (f *File) Truncate(size int64) error { - return f.fd.Truncate(size) -} - -func (f *File) Read(b []byte) (n int, err error) { - return f.fd.Read(b) -} - -// TODO -func (f *File) ReadAt(b []byte, off int64) (n int, err error) { - return 0,nil -} - -// TODO -func (f *File) Readdir(count int) (res []os.FileInfo, err error) { - return nil,nil -} - -// TODO -func (f *File) Readdirnames(n int) (names []string, err error) { - return nil,nil -} - -func (f *File) Seek(offset int64, whence int) (int64, error) { - return f.fd.Seek(offset, whence) -} - -func (f *File) Write(b []byte) (n int, err error) { - return f.fd.Write(b) -} - -// TODO -func (f *File) WriteAt(b []byte, off int64) (n int, err error) { - return 0,nil -} - -func (f *File) WriteString(s string) (ret int, err error) { - return f.fd.Write([]byte(s)) -} diff --git a/vendor/github.com/spf13/afero/sftp_test_go b/vendor/github.com/spf13/afero/sftp_test_go deleted file mode 100644 index bb00535d8..000000000 --- a/vendor/github.com/spf13/afero/sftp_test_go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright © 2015 Jerry Jacobs . -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package afero - -import ( - "testing" - "os" - "log" - "fmt" - "net" - "flag" - "time" - "io/ioutil" - "crypto/rsa" - _rand "crypto/rand" - "encoding/pem" - "crypto/x509" - - "golang.org/x/crypto/ssh" - "github.com/pkg/sftp" -) - -type SftpFsContext struct { - sshc *ssh.Client - sshcfg *ssh.ClientConfig - sftpc *sftp.Client -} - -// TODO we only connect with hardcoded user+pass for now -// it should be possible to use $HOME/.ssh/id_rsa to login into the stub sftp server -func SftpConnect(user, password, host string) (*SftpFsContext, error) { -/* - pemBytes, err := ioutil.ReadFile(os.Getenv("HOME") + "/.ssh/id_rsa") - if err != nil { - return nil,err - } - - signer, err := ssh.ParsePrivateKey(pemBytes) - if err != nil { - return nil,err - } - - sshcfg := &ssh.ClientConfig{ - User: user, - Auth: []ssh.AuthMethod{ - ssh.Password(password), - ssh.PublicKeys(signer), - }, - } -*/ - - sshcfg := &ssh.ClientConfig{ - User: user, - Auth: []ssh.AuthMethod{ - ssh.Password(password), - }, - } - - sshc, err := ssh.Dial("tcp", host, sshcfg) - if err != nil { - return nil,err - } - - sftpc, err := sftp.NewClient(sshc) - if err != nil { - return nil,err - } - - ctx := &SftpFsContext{ - sshc: sshc, - sshcfg: sshcfg, - sftpc: sftpc, - } - - return ctx,nil -} - -func (ctx *SftpFsContext) Disconnect() error { - ctx.sftpc.Close() - ctx.sshc.Close() - return nil -} - -// TODO for such a weird reason rootpath is "." when writing "file1" with afero sftp backend -func RunSftpServer(rootpath string) { - var ( - readOnly bool - debugLevelStr string - debugLevel int - debugStderr bool - rootDir string - ) - - flag.BoolVar(&readOnly, "R", false, "read-only server") - flag.BoolVar(&debugStderr, "e", true, "debug to stderr") - flag.StringVar(&debugLevelStr, "l", "none", "debug level") - flag.StringVar(&rootDir, "root", rootpath, "root directory") - flag.Parse() - - debugStream := ioutil.Discard - if debugStderr { - debugStream = os.Stderr - debugLevel = 1 - } - - // An SSH server is represented by a ServerConfig, which holds - // certificate details and handles authentication of ServerConns. - config := &ssh.ServerConfig{ - PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) { - // Should use constant-time compare (or better, salt+hash) in - // a production setting. - fmt.Fprintf(debugStream, "Login: %s\n", c.User()) - if c.User() == "test" && string(pass) == "test" { - return nil, nil - } - return nil, fmt.Errorf("password rejected for %q", c.User()) - }, - } - - privateBytes, err := ioutil.ReadFile("./test/id_rsa") - if err != nil { - log.Fatal("Failed to load private key", err) - } - - private, err := ssh.ParsePrivateKey(privateBytes) - if err != nil { - log.Fatal("Failed to parse private key", err) - } - - config.AddHostKey(private) - - // Once a ServerConfig has been configured, connections can be - // accepted. - listener, err := net.Listen("tcp", "0.0.0.0:2022") - if err != nil { - log.Fatal("failed to listen for connection", err) - } - fmt.Printf("Listening on %v\n", listener.Addr()) - - nConn, err := listener.Accept() - if err != nil { - log.Fatal("failed to accept incoming connection", err) - } - - // Before use, a handshake must be performed on the incoming - // net.Conn. - _, chans, reqs, err := ssh.NewServerConn(nConn, config) - if err != nil { - log.Fatal("failed to handshake", err) - } - fmt.Fprintf(debugStream, "SSH server established\n") - - // The incoming Request channel must be serviced. - go ssh.DiscardRequests(reqs) - - // Service the incoming Channel channel. - for newChannel := range chans { - // Channels have a type, depending on the application level - // protocol intended. In the case of an SFTP session, this is "subsystem" - // with a payload string of "sftp" - fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType()) - if newChannel.ChannelType() != "session" { - newChannel.Reject(ssh.UnknownChannelType, "unknown channel type") - fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType()) - continue - } - channel, requests, err := newChannel.Accept() - if err != nil { - log.Fatal("could not accept channel.", err) - } - fmt.Fprintf(debugStream, "Channel accepted\n") - - // Sessions have out-of-band requests such as "shell", - // "pty-req" and "env". Here we handle only the - // "subsystem" request. - go func(in <-chan *ssh.Request) { - for req := range in { - fmt.Fprintf(debugStream, "Request: %v\n", req.Type) - ok := false - switch req.Type { - case "subsystem": - fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:]) - if string(req.Payload[4:]) == "sftp" { - ok = true - } - } - fmt.Fprintf(debugStream, " - accepted: %v\n", ok) - req.Reply(ok, nil) - } - }(requests) - - server, err := sftp.NewServer(channel, channel, debugStream, debugLevel, readOnly, rootpath) - if err != nil { - log.Fatal(err) - } - if err := server.Serve(); err != nil { - log.Fatal("sftp server completed with error:", err) - } - } -} - -// MakeSSHKeyPair make a pair of public and private keys for SSH access. -// Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file. -// Private Key generated is PEM encoded -func MakeSSHKeyPair(bits int, pubKeyPath, privateKeyPath string) error { - privateKey, err := rsa.GenerateKey(_rand.Reader, bits) - if err != nil { - return err - } - - // generate and write private key as PEM - privateKeyFile, err := os.Create(privateKeyPath) - defer privateKeyFile.Close() - if err != nil { - return err - } - - privateKeyPEM := &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(privateKey)} - if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil { - return err - } - - // generate and write public key - pub, err := ssh.NewPublicKey(&privateKey.PublicKey) - if err != nil { - return err - } - - return ioutil.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0655) -} - -func TestSftpCreate(t *testing.T) { - os.Mkdir("./test", 0777) - MakeSSHKeyPair(1024, "./test/id_rsa.pub", "./test/id_rsa") - - go RunSftpServer("./test/") - time.Sleep(5 * time.Second) - - ctx, err := SftpConnect("test", "test", "localhost:2022") - if err != nil { - t.Fatal(err) - } - defer ctx.Disconnect() - - var AppFs Fs = SftpFs{ - SftpClient: ctx.sftpc, - } - - AppFs.MkdirAll("test/dir1/dir2/dir3", os.FileMode(0777)) - AppFs.Mkdir("test/foo", os.FileMode(0000)) - AppFs.Chmod("test/foo", os.FileMode(0700)) - AppFs.Mkdir("test/bar", os.FileMode(0777)) - - file, err := AppFs.Create("file1") - if err != nil { - t.Error(err) - } - defer file.Close() - - file.Write([]byte("hello\t")) - file.WriteString("world!\n") - - f1, err := AppFs.Open("file1") - if err != nil { - log.Fatalf("open: %v", err) - } - defer f1.Close() - - b := make([]byte, 100) - - _, err = f1.Read(b) - fmt.Println(string(b)) - - // TODO check here if "hello\tworld\n" is in buffer b -} diff --git a/vendor/github.com/spf13/pflag/.travis.yml b/vendor/github.com/spf13/pflag/.travis.yml index 0a7c13622..707bdc39a 100644 --- a/vendor/github.com/spf13/pflag/.travis.yml +++ b/vendor/github.com/spf13/pflag/.travis.yml @@ -3,9 +3,8 @@ sudo: false language: go go: - - 1.5.4 - 1.6.3 - - 1.7 + - 1.7.3 - tip matrix: diff --git a/vendor/golang.org/x/crypto/.gitattributes b/vendor/golang.org/x/crypto/.gitattributes deleted file mode 100644 index d2f212e5d..000000000 --- a/vendor/golang.org/x/crypto/.gitattributes +++ /dev/null @@ -1,10 +0,0 @@ -# Treat all files in this repo as binary, with no git magic updating -# line endings. Windows users contributing to Go will need to use a -# modern version of git and editors capable of LF line endings. -# -# We'll prevent accidental CRLF line endings from entering the repo -# via the git-review gofmt checks. -# -# See golang.org/issue/9281 - -* -text diff --git a/vendor/golang.org/x/crypto/.gitignore b/vendor/golang.org/x/crypto/.gitignore deleted file mode 100644 index 8339fd61d..000000000 --- a/vendor/golang.org/x/crypto/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# Add no patterns to .hgignore except for files generated by the build. -last-change diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS deleted file mode 100644 index 15167cd74..000000000 --- a/vendor/golang.org/x/crypto/AUTHORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code refers to The Go Authors for copyright purposes. -# The master list of authors is in the main Go distribution, -# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTING.md b/vendor/golang.org/x/crypto/CONTRIBUTING.md deleted file mode 100644 index 88dff59bc..000000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing to Go - -Go is an open source project. - -It is the work of hundreds of contributors. We appreciate your help! - - -## Filing issues - -When [filing an issue](https://golang.org/issue/new), make sure to answer these five questions: - -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? - -General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. -The gophers there will answer or ask you to file an issue if you've tripped over a bug. - -## Contributing code - -Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) -before sending patches. - -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - -Unless otherwise noted, the Go source files are distributed under -the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS deleted file mode 100644 index 1c4577e96..000000000 --- a/vendor/golang.org/x/crypto/CONTRIBUTORS +++ /dev/null @@ -1,3 +0,0 @@ -# This source code was written by the Go contributors. -# The master list of contributors is in the main Go distribution, -# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS deleted file mode 100644 index 733099041..000000000 --- a/vendor/golang.org/x/crypto/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/README b/vendor/golang.org/x/crypto/README deleted file mode 100644 index f1e0cbf94..000000000 --- a/vendor/golang.org/x/crypto/README +++ /dev/null @@ -1,3 +0,0 @@ -This repository holds supplementary Go cryptography libraries. - -To submit changes to this repository, see http://golang.org/doc/contribute.html. diff --git a/vendor/golang.org/x/crypto/codereview.cfg b/vendor/golang.org/x/crypto/codereview.cfg deleted file mode 100644 index 3f8b14b64..000000000 --- a/vendor/golang.org/x/crypto/codereview.cfg +++ /dev/null @@ -1 +0,0 @@ -issuerepo: golang/go diff --git a/vendor/golang.org/x/crypto/curve25519/const_amd64.s b/vendor/golang.org/x/crypto/curve25519/const_amd64.s deleted file mode 100644 index 797f9b051..000000000 --- a/vendor/golang.org/x/crypto/curve25519/const_amd64.s +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -DATA ·REDMASK51(SB)/8, $0x0007FFFFFFFFFFFF -GLOBL ·REDMASK51(SB), 8, $8 - -DATA ·_121666_213(SB)/8, $996687872 -GLOBL ·_121666_213(SB), 8, $8 - -DATA ·_2P0(SB)/8, $0xFFFFFFFFFFFDA -GLOBL ·_2P0(SB), 8, $8 - -DATA ·_2P1234(SB)/8, $0xFFFFFFFFFFFFE -GLOBL ·_2P1234(SB), 8, $8 diff --git a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s b/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s deleted file mode 100644 index 45484d1b5..000000000 --- a/vendor/golang.org/x/crypto/curve25519/cswap_amd64.s +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func cswap(inout *[5]uint64, v uint64) -TEXT ·cswap(SB),7,$0 - MOVQ inout+0(FP),DI - MOVQ v+8(FP),SI - - CMPQ SI,$1 - MOVQ 0(DI),SI - MOVQ 80(DI),DX - MOVQ 8(DI),CX - MOVQ 88(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,0(DI) - MOVQ DX,80(DI) - MOVQ CX,8(DI) - MOVQ R8,88(DI) - MOVQ 16(DI),SI - MOVQ 96(DI),DX - MOVQ 24(DI),CX - MOVQ 104(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,16(DI) - MOVQ DX,96(DI) - MOVQ CX,24(DI) - MOVQ R8,104(DI) - MOVQ 32(DI),SI - MOVQ 112(DI),DX - MOVQ 40(DI),CX - MOVQ 120(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,32(DI) - MOVQ DX,112(DI) - MOVQ CX,40(DI) - MOVQ R8,120(DI) - MOVQ 48(DI),SI - MOVQ 128(DI),DX - MOVQ 56(DI),CX - MOVQ 136(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,48(DI) - MOVQ DX,128(DI) - MOVQ CX,56(DI) - MOVQ R8,136(DI) - MOVQ 64(DI),SI - MOVQ 144(DI),DX - MOVQ 72(DI),CX - MOVQ 152(DI),R8 - MOVQ SI,R9 - CMOVQEQ DX,SI - CMOVQEQ R9,DX - MOVQ CX,R9 - CMOVQEQ R8,CX - CMOVQEQ R9,R8 - MOVQ SI,64(DI) - MOVQ DX,144(DI) - MOVQ CX,72(DI) - MOVQ R8,152(DI) - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go deleted file mode 100644 index 6918c47fc..000000000 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ /dev/null @@ -1,841 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// We have a implementation in amd64 assembly so this code is only run on -// non-amd64 platforms. The amd64 assembly does not support gccgo. -// +build !amd64 gccgo appengine - -package curve25519 - -// This code is a port of the public domain, "ref10" implementation of -// curve25519 from SUPERCOP 20130419 by D. J. Bernstein. - -// fieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type fieldElement [10]int32 - -func feZero(fe *fieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func feOne(fe *fieldElement) { - feZero(fe) - fe[0] = 1 -} - -func feAdd(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func feSub(dst, a, b *fieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func feCopy(dst, src *fieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// feCSwap replaces (f,g) with (g,f) if b == 1; replaces (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func feCSwap(f, g *fieldElement, b int32) { - var x fieldElement - b = -b - for i := range x { - x[i] = b & (f[i] ^ g[i]) - } - - for i := range f { - f[i] ^= x[i] - } - for i := range g { - g[i] ^= x[i] - } -} - -// load3 reads a 24-bit, little-endian value from in. -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -// load4 reads a 32-bit, little-endian value from in. -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func feFromBytes(dst *fieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := load3(src[29:]) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// feToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -// feMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func feMul(h, f, g *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 // 1.4*2^29 - g2_19 := 19 * g2 // 1.4*2^30; still ok - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - // |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - // i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - // |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - // i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - // |h0| <= 2^25 - // |h4| <= 2^25 - // |h1| <= 1.51*2^58 - // |h5| <= 1.51*2^58 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - // |h1| <= 2^24; from now on fits into int32 - // |h5| <= 2^24; from now on fits into int32 - // |h2| <= 1.21*2^59 - // |h6| <= 1.21*2^59 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - // |h2| <= 2^25; from now on fits into int32 unchanged - // |h6| <= 2^25; from now on fits into int32 unchanged - // |h3| <= 1.51*2^58 - // |h7| <= 1.51*2^58 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - // |h3| <= 2^24; from now on fits into int32 unchanged - // |h7| <= 2^24; from now on fits into int32 unchanged - // |h4| <= 1.52*2^33 - // |h8| <= 1.52*2^33 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - // |h4| <= 2^25; from now on fits into int32 unchanged - // |h8| <= 2^25; from now on fits into int32 unchanged - // |h5| <= 1.01*2^24 - // |h9| <= 1.51*2^58 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - // |h9| <= 2^24; from now on fits into int32 unchanged - // |h0| <= 1.8*2^37 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - // |h0| <= 2^25; from now on fits into int32 unchanged - // |h1| <= 1.01*2^24 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feSquare(h, f *fieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feMul121666 calculates h = f * 121666. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func feMul121666(h, f *fieldElement) { - h0 := int64(f[0]) * 121666 - h1 := int64(f[1]) * 121666 - h2 := int64(f[2]) * 121666 - h3 := int64(f[3]) * 121666 - h4 := int64(f[4]) * 121666 - h5 := int64(f[5]) * 121666 - h6 := int64(f[6]) * 121666 - h7 := int64(f[7]) * 121666 - h8 := int64(f[8]) * 121666 - h9 := int64(f[9]) * 121666 - var carry [10]int64 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// feInvert sets out = z^-1. -func feInvert(out, z *fieldElement) { - var t0, t1, t2, t3 fieldElement - var i int - - feSquare(&t0, z) - for i = 1; i < 1; i++ { - feSquare(&t0, &t0) - } - feSquare(&t1, &t0) - for i = 1; i < 2; i++ { - feSquare(&t1, &t1) - } - feMul(&t1, z, &t1) - feMul(&t0, &t0, &t1) - feSquare(&t2, &t0) - for i = 1; i < 1; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t1, &t2) - feSquare(&t2, &t1) - for i = 1; i < 5; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 20; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 10; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t2, &t1) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t2, &t2, &t1) - feSquare(&t3, &t2) - for i = 1; i < 100; i++ { - feSquare(&t3, &t3) - } - feMul(&t2, &t3, &t2) - feSquare(&t2, &t2) - for i = 1; i < 50; i++ { - feSquare(&t2, &t2) - } - feMul(&t1, &t2, &t1) - feSquare(&t1, &t1) - for i = 1; i < 5; i++ { - feSquare(&t1, &t1) - } - feMul(out, &t1, &t0) -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - - copy(e[:], in[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var x1, x2, z2, x3, z3, tmp0, tmp1 fieldElement - feFromBytes(&x1, base) - feOne(&x2) - feCopy(&x3, &x1) - feOne(&z3) - - swap := int32(0) - for pos := 254; pos >= 0; pos-- { - b := e[pos/8] >> uint(pos&7) - b &= 1 - swap ^= int32(b) - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - swap = int32(b) - - feSub(&tmp0, &x3, &z3) - feSub(&tmp1, &x2, &z2) - feAdd(&x2, &x2, &z2) - feAdd(&z2, &x3, &z3) - feMul(&z3, &tmp0, &x2) - feMul(&z2, &z2, &tmp1) - feSquare(&tmp0, &tmp1) - feSquare(&tmp1, &x2) - feAdd(&x3, &z3, &z2) - feSub(&z2, &z3, &z2) - feMul(&x2, &tmp1, &tmp0) - feSub(&tmp1, &tmp1, &tmp0) - feSquare(&z2, &z2) - feMul121666(&z3, &tmp1) - feSquare(&x3, &x3) - feAdd(&tmp0, &tmp0, &z3) - feMul(&z3, &x1, &z2) - feMul(&z2, &tmp1, &tmp0) - } - - feCSwap(&x2, &x3, swap) - feCSwap(&z2, &z3, swap) - - feInvert(&z2, &z2) - feMul(&x2, &x2, &z2) - feToBytes(out, &x2) -} diff --git a/vendor/golang.org/x/crypto/curve25519/doc.go b/vendor/golang.org/x/crypto/curve25519/doc.go deleted file mode 100644 index ebeea3c2d..000000000 --- a/vendor/golang.org/x/crypto/curve25519/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package curve25519 provides an implementation of scalar multiplication on -// the elliptic curve known as curve25519. See http://cr.yp.to/ecdh.html -package curve25519 // import "golang.org/x/crypto/curve25519" - -// basePoint is the x coordinate of the generator of the curve. -var basePoint = [32]byte{9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - -// ScalarMult sets dst to the product in*base where dst and base are the x -// coordinates of group points and all values are in little-endian form. -func ScalarMult(dst, in, base *[32]byte) { - scalarMult(dst, in, base) -} - -// ScalarBaseMult sets dst to the product in*base where dst and base are the x -// coordinates of group points, base is the standard generator and all values -// are in little-endian form. -func ScalarBaseMult(dst, in *[32]byte) { - ScalarMult(dst, in, &basePoint) -} diff --git a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s b/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s deleted file mode 100644 index 37599fac0..000000000 --- a/vendor/golang.org/x/crypto/curve25519/freeze_amd64.s +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func freeze(inout *[5]uint64) -TEXT ·freeze(SB),7,$96-8 - MOVQ inout+0(FP), DI - - MOVQ SP,R11 - MOVQ $31,CX - NOTQ CX - ANDQ CX,SP - ADDQ $32,SP - - MOVQ R11,0(SP) - MOVQ R12,8(SP) - MOVQ R13,16(SP) - MOVQ R14,24(SP) - MOVQ R15,32(SP) - MOVQ BX,40(SP) - MOVQ BP,48(SP) - MOVQ 0(DI),SI - MOVQ 8(DI),DX - MOVQ 16(DI),CX - MOVQ 24(DI),R8 - MOVQ 32(DI),R9 - MOVQ ·REDMASK51(SB),AX - MOVQ AX,R10 - SUBQ $18,R10 - MOVQ $3,R11 -REDUCELOOP: - MOVQ SI,R12 - SHRQ $51,R12 - ANDQ AX,SI - ADDQ R12,DX - MOVQ DX,R12 - SHRQ $51,R12 - ANDQ AX,DX - ADDQ R12,CX - MOVQ CX,R12 - SHRQ $51,R12 - ANDQ AX,CX - ADDQ R12,R8 - MOVQ R8,R12 - SHRQ $51,R12 - ANDQ AX,R8 - ADDQ R12,R9 - MOVQ R9,R12 - SHRQ $51,R12 - ANDQ AX,R9 - IMUL3Q $19,R12,R12 - ADDQ R12,SI - SUBQ $1,R11 - JA REDUCELOOP - MOVQ $1,R12 - CMPQ R10,SI - CMOVQLT R11,R12 - CMPQ AX,DX - CMOVQNE R11,R12 - CMPQ AX,CX - CMOVQNE R11,R12 - CMPQ AX,R8 - CMOVQNE R11,R12 - CMPQ AX,R9 - CMOVQNE R11,R12 - NEGQ R12 - ANDQ R12,AX - ANDQ R12,R10 - SUBQ R10,SI - SUBQ AX,DX - SUBQ AX,CX - SUBQ AX,R8 - SUBQ AX,R9 - MOVQ SI,0(DI) - MOVQ DX,8(DI) - MOVQ CX,16(DI) - MOVQ R8,24(DI) - MOVQ R9,32(DI) - MOVQ 0(SP),R11 - MOVQ 8(SP),R12 - MOVQ 16(SP),R13 - MOVQ 24(SP),R14 - MOVQ 32(SP),R15 - MOVQ 40(SP),BX - MOVQ 48(SP),BP - MOVQ R11,SP - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s b/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s deleted file mode 100644 index 3949f9cfa..000000000 --- a/vendor/golang.org/x/crypto/curve25519/ladderstep_amd64.s +++ /dev/null @@ -1,1398 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func ladderstep(inout *[5][5]uint64) -TEXT ·ladderstep(SB),0,$384-8 - MOVQ inout+0(FP),DI - - MOVQ SP,R11 - MOVQ $31,CX - NOTQ CX - ANDQ CX,SP - ADDQ $32,SP - - MOVQ R11,0(SP) - MOVQ R12,8(SP) - MOVQ R13,16(SP) - MOVQ R14,24(SP) - MOVQ R15,32(SP) - MOVQ BX,40(SP) - MOVQ BP,48(SP) - MOVQ 40(DI),SI - MOVQ 48(DI),DX - MOVQ 56(DI),CX - MOVQ 64(DI),R8 - MOVQ 72(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 80(DI),SI - ADDQ 88(DI),DX - ADDQ 96(DI),CX - ADDQ 104(DI),R8 - ADDQ 112(DI),R9 - SUBQ 80(DI),AX - SUBQ 88(DI),R10 - SUBQ 96(DI),R11 - SUBQ 104(DI),R12 - SUBQ 112(DI),R13 - MOVQ SI,56(SP) - MOVQ DX,64(SP) - MOVQ CX,72(SP) - MOVQ R8,80(SP) - MOVQ R9,88(SP) - MOVQ AX,96(SP) - MOVQ R10,104(SP) - MOVQ R11,112(SP) - MOVQ R12,120(SP) - MOVQ R13,128(SP) - MOVQ 96(SP),AX - MULQ 96(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 96(SP),AX - SHLQ $1,AX - MULQ 104(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 96(SP),AX - SHLQ $1,AX - MULQ 112(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 96(SP),AX - SHLQ $1,AX - MULQ 120(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 96(SP),AX - SHLQ $1,AX - MULQ 128(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 104(SP),AX - MULQ 104(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 104(SP),AX - SHLQ $1,AX - MULQ 112(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 104(SP),AX - SHLQ $1,AX - MULQ 120(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 104(SP),DX - IMUL3Q $38,DX,AX - MULQ 128(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 112(SP),AX - MULQ 112(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 112(SP),DX - IMUL3Q $38,DX,AX - MULQ 120(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 112(SP),DX - IMUL3Q $38,DX,AX - MULQ 128(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 120(SP),DX - IMUL3Q $19,DX,AX - MULQ 120(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 120(SP),DX - IMUL3Q $38,DX,AX - MULQ 128(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(SP),DX - IMUL3Q $19,DX,AX - MULQ 128(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,136(SP) - MOVQ R8,144(SP) - MOVQ R9,152(SP) - MOVQ AX,160(SP) - MOVQ R10,168(SP) - MOVQ 56(SP),AX - MULQ 56(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 56(SP),AX - SHLQ $1,AX - MULQ 64(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 56(SP),AX - SHLQ $1,AX - MULQ 72(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 56(SP),AX - SHLQ $1,AX - MULQ 80(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 56(SP),AX - SHLQ $1,AX - MULQ 88(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 64(SP),AX - MULQ 64(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - SHLQ $1,AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 64(SP),AX - SHLQ $1,AX - MULQ 80(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 64(SP),DX - IMUL3Q $38,DX,AX - MULQ 88(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 72(SP),AX - MULQ 72(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 72(SP),DX - IMUL3Q $38,DX,AX - MULQ 80(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 72(SP),DX - IMUL3Q $38,DX,AX - MULQ 88(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 80(SP),DX - IMUL3Q $19,DX,AX - MULQ 80(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 80(SP),DX - IMUL3Q $38,DX,AX - MULQ 88(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 88(SP),DX - IMUL3Q $19,DX,AX - MULQ 88(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,176(SP) - MOVQ R8,184(SP) - MOVQ R9,192(SP) - MOVQ AX,200(SP) - MOVQ R10,208(SP) - MOVQ SI,SI - MOVQ R8,DX - MOVQ R9,CX - MOVQ AX,R8 - MOVQ R10,R9 - ADDQ ·_2P0(SB),SI - ADDQ ·_2P1234(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R8 - ADDQ ·_2P1234(SB),R9 - SUBQ 136(SP),SI - SUBQ 144(SP),DX - SUBQ 152(SP),CX - SUBQ 160(SP),R8 - SUBQ 168(SP),R9 - MOVQ SI,216(SP) - MOVQ DX,224(SP) - MOVQ CX,232(SP) - MOVQ R8,240(SP) - MOVQ R9,248(SP) - MOVQ 120(DI),SI - MOVQ 128(DI),DX - MOVQ 136(DI),CX - MOVQ 144(DI),R8 - MOVQ 152(DI),R9 - MOVQ SI,AX - MOVQ DX,R10 - MOVQ CX,R11 - MOVQ R8,R12 - MOVQ R9,R13 - ADDQ ·_2P0(SB),AX - ADDQ ·_2P1234(SB),R10 - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 160(DI),SI - ADDQ 168(DI),DX - ADDQ 176(DI),CX - ADDQ 184(DI),R8 - ADDQ 192(DI),R9 - SUBQ 160(DI),AX - SUBQ 168(DI),R10 - SUBQ 176(DI),R11 - SUBQ 184(DI),R12 - SUBQ 192(DI),R13 - MOVQ SI,256(SP) - MOVQ DX,264(SP) - MOVQ CX,272(SP) - MOVQ R8,280(SP) - MOVQ R9,288(SP) - MOVQ AX,296(SP) - MOVQ R10,304(SP) - MOVQ R11,312(SP) - MOVQ R12,320(SP) - MOVQ R13,328(SP) - MOVQ 280(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,336(SP) - MULQ 112(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 288(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,344(SP) - MULQ 104(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),AX - MULQ 96(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 256(SP),AX - MULQ 104(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 256(SP),AX - MULQ 112(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 256(SP),AX - MULQ 120(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 256(SP),AX - MULQ 128(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 264(SP),AX - MULQ 96(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 264(SP),AX - MULQ 104(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 264(SP),AX - MULQ 112(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 264(SP),AX - MULQ 120(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 264(SP),DX - IMUL3Q $19,DX,AX - MULQ 128(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 272(SP),AX - MULQ 96(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 272(SP),AX - MULQ 104(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 272(SP),AX - MULQ 112(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 272(SP),DX - IMUL3Q $19,DX,AX - MULQ 120(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 272(SP),DX - IMUL3Q $19,DX,AX - MULQ 128(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 280(SP),AX - MULQ 96(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 280(SP),AX - MULQ 104(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 336(SP),AX - MULQ 120(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 336(SP),AX - MULQ 128(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 288(SP),AX - MULQ 96(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 344(SP),AX - MULQ 112(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 344(SP),AX - MULQ 120(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 344(SP),AX - MULQ 128(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,96(SP) - MOVQ R8,104(SP) - MOVQ R9,112(SP) - MOVQ AX,120(SP) - MOVQ R10,128(SP) - MOVQ 320(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,256(SP) - MULQ 72(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 328(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,264(SP) - MULQ 64(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 296(SP),AX - MULQ 56(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 296(SP),AX - MULQ 64(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 296(SP),AX - MULQ 72(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 296(SP),AX - MULQ 80(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 296(SP),AX - MULQ 88(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 304(SP),AX - MULQ 56(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 304(SP),AX - MULQ 64(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 304(SP),AX - MULQ 72(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 304(SP),AX - MULQ 80(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 304(SP),DX - IMUL3Q $19,DX,AX - MULQ 88(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 312(SP),AX - MULQ 56(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 312(SP),AX - MULQ 64(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 312(SP),AX - MULQ 72(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 312(SP),DX - IMUL3Q $19,DX,AX - MULQ 80(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 312(SP),DX - IMUL3Q $19,DX,AX - MULQ 88(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 320(SP),AX - MULQ 56(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 320(SP),AX - MULQ 64(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 256(SP),AX - MULQ 80(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 256(SP),AX - MULQ 88(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 328(SP),AX - MULQ 56(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 264(SP),AX - MULQ 72(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 264(SP),AX - MULQ 80(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 264(SP),AX - MULQ 88(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,DX - MOVQ R8,CX - MOVQ R9,R11 - MOVQ AX,R12 - MOVQ R10,R13 - ADDQ ·_2P0(SB),DX - ADDQ ·_2P1234(SB),CX - ADDQ ·_2P1234(SB),R11 - ADDQ ·_2P1234(SB),R12 - ADDQ ·_2P1234(SB),R13 - ADDQ 96(SP),SI - ADDQ 104(SP),R8 - ADDQ 112(SP),R9 - ADDQ 120(SP),AX - ADDQ 128(SP),R10 - SUBQ 96(SP),DX - SUBQ 104(SP),CX - SUBQ 112(SP),R11 - SUBQ 120(SP),R12 - SUBQ 128(SP),R13 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ DX,160(DI) - MOVQ CX,168(DI) - MOVQ R11,176(DI) - MOVQ R12,184(DI) - MOVQ R13,192(DI) - MOVQ 120(DI),AX - MULQ 120(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 128(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 136(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 144(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 120(DI),AX - SHLQ $1,AX - MULQ 152(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 128(DI),AX - MULQ 128(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 136(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 128(DI),AX - SHLQ $1,AX - MULQ 144(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 128(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),AX - MULQ 136(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 144(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 136(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $19,DX,AX - MULQ 144(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 144(DI),DX - IMUL3Q $38,DX,AX - MULQ 152(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 152(DI),DX - IMUL3Q $19,DX,AX - MULQ 152(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,120(DI) - MOVQ R8,128(DI) - MOVQ R9,136(DI) - MOVQ AX,144(DI) - MOVQ R10,152(DI) - MOVQ 160(DI),AX - MULQ 160(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 168(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 176(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 184(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - SHLQ $1,AX - MULQ 192(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 168(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 176(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - SHLQ $1,AX - MULQ 184(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 176(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 184(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $19,DX,AX - MULQ 184(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),DX - IMUL3Q $38,DX,AX - MULQ 192(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MULQ 192(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - ANDQ DX,SI - MOVQ CX,R8 - SHRQ $51,CX - ADDQ R10,CX - ANDQ DX,R8 - MOVQ CX,R9 - SHRQ $51,CX - ADDQ R12,CX - ANDQ DX,R9 - MOVQ CX,AX - SHRQ $51,CX - ADDQ R14,CX - ANDQ DX,AX - MOVQ CX,R10 - SHRQ $51,CX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 184(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,56(SP) - MULQ 16(DI) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 192(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,64(SP) - MULQ 8(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 0(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 160(DI),AX - MULQ 8(DI) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 160(DI),AX - MULQ 16(DI) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 160(DI),AX - MULQ 24(DI) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 160(DI),AX - MULQ 32(DI) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 168(DI),AX - MULQ 0(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 168(DI),AX - MULQ 8(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 168(DI),AX - MULQ 16(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 168(DI),AX - MULQ 24(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 168(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),AX - MULQ 0(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 176(DI),AX - MULQ 8(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 176(DI),AX - MULQ 16(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 24(DI) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(DI),DX - IMUL3Q $19,DX,AX - MULQ 32(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(DI),AX - MULQ 0(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 184(DI),AX - MULQ 8(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),AX - MULQ 24(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 56(SP),AX - MULQ 32(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(DI),AX - MULQ 0(DI) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 64(SP),AX - MULQ 16(DI) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),AX - MULQ 24(DI) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - MULQ 32(DI) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,160(DI) - MOVQ R8,168(DI) - MOVQ R9,176(DI) - MOVQ AX,184(DI) - MOVQ R10,192(DI) - MOVQ 200(SP),SI - IMUL3Q $19,SI,AX - MOVQ AX,56(SP) - MULQ 152(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 208(SP),DX - IMUL3Q $19,DX,AX - MOVQ AX,64(SP) - MULQ 144(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(SP),AX - MULQ 136(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 176(SP),AX - MULQ 144(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 176(SP),AX - MULQ 152(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 176(SP),AX - MULQ 160(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 176(SP),AX - MULQ 168(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 184(SP),AX - MULQ 136(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 184(SP),AX - MULQ 144(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 184(SP),AX - MULQ 152(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 184(SP),AX - MULQ 160(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 184(SP),DX - IMUL3Q $19,DX,AX - MULQ 168(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 192(SP),AX - MULQ 136(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 192(SP),AX - MULQ 144(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 192(SP),AX - MULQ 152(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 192(SP),DX - IMUL3Q $19,DX,AX - MULQ 160(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 192(SP),DX - IMUL3Q $19,DX,AX - MULQ 168(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 200(SP),AX - MULQ 136(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 200(SP),AX - MULQ 144(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),AX - MULQ 160(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 56(SP),AX - MULQ 168(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 208(SP),AX - MULQ 136(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 64(SP),AX - MULQ 152(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),AX - MULQ 160(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - MULQ 168(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,40(DI) - MOVQ R8,48(DI) - MOVQ R9,56(DI) - MOVQ AX,64(DI) - MOVQ R10,72(DI) - MOVQ 216(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - MOVQ AX,SI - MOVQ DX,CX - MOVQ 224(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,CX - MOVQ DX,R8 - MOVQ 232(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R8 - MOVQ DX,R9 - MOVQ 240(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R9 - MOVQ DX,R10 - MOVQ 248(SP),AX - MULQ ·_121666_213(SB) - SHRQ $13,AX - ADDQ AX,R10 - IMUL3Q $19,DX,DX - ADDQ DX,SI - ADDQ 136(SP),SI - ADDQ 144(SP),CX - ADDQ 152(SP),R8 - ADDQ 160(SP),R9 - ADDQ 168(SP),R10 - MOVQ SI,80(DI) - MOVQ CX,88(DI) - MOVQ R8,96(DI) - MOVQ R9,104(DI) - MOVQ R10,112(DI) - MOVQ 104(DI),SI - IMUL3Q $19,SI,AX - MOVQ AX,56(SP) - MULQ 232(SP) - MOVQ AX,SI - MOVQ DX,CX - MOVQ 112(DI),DX - IMUL3Q $19,DX,AX - MOVQ AX,64(SP) - MULQ 224(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 216(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 80(DI),AX - MULQ 224(SP) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 80(DI),AX - MULQ 232(SP) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 80(DI),AX - MULQ 240(SP) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 80(DI),AX - MULQ 248(SP) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 88(DI),AX - MULQ 216(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 88(DI),AX - MULQ 224(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 88(DI),AX - MULQ 232(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 88(DI),AX - MULQ 240(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 88(DI),DX - IMUL3Q $19,DX,AX - MULQ 248(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),AX - MULQ 216(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 96(DI),AX - MULQ 224(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 96(DI),AX - MULQ 232(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 240(SP) - ADDQ AX,SI - ADCQ DX,CX - MOVQ 96(DI),DX - IMUL3Q $19,DX,AX - MULQ 248(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 104(DI),AX - MULQ 216(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 104(DI),AX - MULQ 224(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 56(SP),AX - MULQ 240(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 56(SP),AX - MULQ 248(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 112(DI),AX - MULQ 216(SP) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 64(SP),AX - MULQ 232(SP) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 64(SP),AX - MULQ 240(SP) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - MULQ 248(SP) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ ·REDMASK51(SB),DX - SHLQ $13,CX:SI - ANDQ DX,SI - SHLQ $13,R9:R8 - ANDQ DX,R8 - ADDQ CX,R8 - SHLQ $13,R11:R10 - ANDQ DX,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ DX,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ DX,R14 - ADDQ R13,R14 - IMUL3Q $19,R15,CX - ADDQ CX,SI - MOVQ SI,CX - SHRQ $51,CX - ADDQ R8,CX - MOVQ CX,R8 - SHRQ $51,CX - ANDQ DX,SI - ADDQ R10,CX - MOVQ CX,R9 - SHRQ $51,CX - ANDQ DX,R8 - ADDQ R12,CX - MOVQ CX,AX - SHRQ $51,CX - ANDQ DX,R9 - ADDQ R14,CX - MOVQ CX,R10 - SHRQ $51,CX - ANDQ DX,AX - IMUL3Q $19,CX,CX - ADDQ CX,SI - ANDQ DX,R10 - MOVQ SI,80(DI) - MOVQ R8,88(DI) - MOVQ R9,96(DI) - MOVQ AX,104(DI) - MOVQ R10,112(DI) - MOVQ 0(SP),R11 - MOVQ 8(SP),R12 - MOVQ 16(SP),R13 - MOVQ 24(SP),R14 - MOVQ 32(SP),R15 - MOVQ 40(SP),BX - MOVQ 48(SP),BP - MOVQ R11,SP - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go b/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go deleted file mode 100644 index 5822bd533..000000000 --- a/vendor/golang.org/x/crypto/curve25519/mont25519_amd64.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build amd64,!gccgo,!appengine - -package curve25519 - -// These functions are implemented in the .s files. The names of the functions -// in the rest of the file are also taken from the SUPERCOP sources to help -// people following along. - -//go:noescape - -func cswap(inout *[5]uint64, v uint64) - -//go:noescape - -func ladderstep(inout *[5][5]uint64) - -//go:noescape - -func freeze(inout *[5]uint64) - -//go:noescape - -func mul(dest, a, b *[5]uint64) - -//go:noescape - -func square(out, in *[5]uint64) - -// mladder uses a Montgomery ladder to calculate (xr/zr) *= s. -func mladder(xr, zr *[5]uint64, s *[32]byte) { - var work [5][5]uint64 - - work[0] = *xr - setint(&work[1], 1) - setint(&work[2], 0) - work[3] = *xr - setint(&work[4], 1) - - j := uint(6) - var prevbit byte - - for i := 31; i >= 0; i-- { - for j < 8 { - bit := ((*s)[i] >> j) & 1 - swap := bit ^ prevbit - prevbit = bit - cswap(&work[1], uint64(swap)) - ladderstep(&work) - j-- - } - j = 7 - } - - *xr = work[1] - *zr = work[2] -} - -func scalarMult(out, in, base *[32]byte) { - var e [32]byte - copy(e[:], (*in)[:]) - e[0] &= 248 - e[31] &= 127 - e[31] |= 64 - - var t, z [5]uint64 - unpack(&t, base) - mladder(&t, &z, &e) - invert(&z, &z) - mul(&t, &t, &z) - pack(out, &t) -} - -func setint(r *[5]uint64, v uint64) { - r[0] = v - r[1] = 0 - r[2] = 0 - r[3] = 0 - r[4] = 0 -} - -// unpack sets r = x where r consists of 5, 51-bit limbs in little-endian -// order. -func unpack(r *[5]uint64, x *[32]byte) { - r[0] = uint64(x[0]) | - uint64(x[1])<<8 | - uint64(x[2])<<16 | - uint64(x[3])<<24 | - uint64(x[4])<<32 | - uint64(x[5])<<40 | - uint64(x[6]&7)<<48 - - r[1] = uint64(x[6])>>3 | - uint64(x[7])<<5 | - uint64(x[8])<<13 | - uint64(x[9])<<21 | - uint64(x[10])<<29 | - uint64(x[11])<<37 | - uint64(x[12]&63)<<45 - - r[2] = uint64(x[12])>>6 | - uint64(x[13])<<2 | - uint64(x[14])<<10 | - uint64(x[15])<<18 | - uint64(x[16])<<26 | - uint64(x[17])<<34 | - uint64(x[18])<<42 | - uint64(x[19]&1)<<50 - - r[3] = uint64(x[19])>>1 | - uint64(x[20])<<7 | - uint64(x[21])<<15 | - uint64(x[22])<<23 | - uint64(x[23])<<31 | - uint64(x[24])<<39 | - uint64(x[25]&15)<<47 - - r[4] = uint64(x[25])>>4 | - uint64(x[26])<<4 | - uint64(x[27])<<12 | - uint64(x[28])<<20 | - uint64(x[29])<<28 | - uint64(x[30])<<36 | - uint64(x[31]&127)<<44 -} - -// pack sets out = x where out is the usual, little-endian form of the 5, -// 51-bit limbs in x. -func pack(out *[32]byte, x *[5]uint64) { - t := *x - freeze(&t) - - out[0] = byte(t[0]) - out[1] = byte(t[0] >> 8) - out[2] = byte(t[0] >> 16) - out[3] = byte(t[0] >> 24) - out[4] = byte(t[0] >> 32) - out[5] = byte(t[0] >> 40) - out[6] = byte(t[0] >> 48) - - out[6] ^= byte(t[1]<<3) & 0xf8 - out[7] = byte(t[1] >> 5) - out[8] = byte(t[1] >> 13) - out[9] = byte(t[1] >> 21) - out[10] = byte(t[1] >> 29) - out[11] = byte(t[1] >> 37) - out[12] = byte(t[1] >> 45) - - out[12] ^= byte(t[2]<<6) & 0xc0 - out[13] = byte(t[2] >> 2) - out[14] = byte(t[2] >> 10) - out[15] = byte(t[2] >> 18) - out[16] = byte(t[2] >> 26) - out[17] = byte(t[2] >> 34) - out[18] = byte(t[2] >> 42) - out[19] = byte(t[2] >> 50) - - out[19] ^= byte(t[3]<<1) & 0xfe - out[20] = byte(t[3] >> 7) - out[21] = byte(t[3] >> 15) - out[22] = byte(t[3] >> 23) - out[23] = byte(t[3] >> 31) - out[24] = byte(t[3] >> 39) - out[25] = byte(t[3] >> 47) - - out[25] ^= byte(t[4]<<4) & 0xf0 - out[26] = byte(t[4] >> 4) - out[27] = byte(t[4] >> 12) - out[28] = byte(t[4] >> 20) - out[29] = byte(t[4] >> 28) - out[30] = byte(t[4] >> 36) - out[31] = byte(t[4] >> 44) -} - -// invert calculates r = x^-1 mod p using Fermat's little theorem. -func invert(r *[5]uint64, x *[5]uint64) { - var z2, z9, z11, z2_5_0, z2_10_0, z2_20_0, z2_50_0, z2_100_0, t [5]uint64 - - square(&z2, x) /* 2 */ - square(&t, &z2) /* 4 */ - square(&t, &t) /* 8 */ - mul(&z9, &t, x) /* 9 */ - mul(&z11, &z9, &z2) /* 11 */ - square(&t, &z11) /* 22 */ - mul(&z2_5_0, &t, &z9) /* 2^5 - 2^0 = 31 */ - - square(&t, &z2_5_0) /* 2^6 - 2^1 */ - for i := 1; i < 5; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_10_0, &t, &z2_5_0) /* 2^10 - 2^0 */ - - square(&t, &z2_10_0) /* 2^11 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^20 - 2^10 */ - square(&t, &t) - } - mul(&z2_20_0, &t, &z2_10_0) /* 2^20 - 2^0 */ - - square(&t, &z2_20_0) /* 2^21 - 2^1 */ - for i := 1; i < 20; i++ { /* 2^40 - 2^20 */ - square(&t, &t) - } - mul(&t, &t, &z2_20_0) /* 2^40 - 2^0 */ - - square(&t, &t) /* 2^41 - 2^1 */ - for i := 1; i < 10; i++ { /* 2^50 - 2^10 */ - square(&t, &t) - } - mul(&z2_50_0, &t, &z2_10_0) /* 2^50 - 2^0 */ - - square(&t, &z2_50_0) /* 2^51 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^100 - 2^50 */ - square(&t, &t) - } - mul(&z2_100_0, &t, &z2_50_0) /* 2^100 - 2^0 */ - - square(&t, &z2_100_0) /* 2^101 - 2^1 */ - for i := 1; i < 100; i++ { /* 2^200 - 2^100 */ - square(&t, &t) - } - mul(&t, &t, &z2_100_0) /* 2^200 - 2^0 */ - - square(&t, &t) /* 2^201 - 2^1 */ - for i := 1; i < 50; i++ { /* 2^250 - 2^50 */ - square(&t, &t) - } - mul(&t, &t, &z2_50_0) /* 2^250 - 2^0 */ - - square(&t, &t) /* 2^251 - 2^1 */ - square(&t, &t) /* 2^252 - 2^2 */ - square(&t, &t) /* 2^253 - 2^3 */ - - square(&t, &t) /* 2^254 - 2^4 */ - - square(&t, &t) /* 2^255 - 2^5 */ - mul(r, &t, &z11) /* 2^255 - 21 */ -} diff --git a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s b/vendor/golang.org/x/crypto/curve25519/mul_amd64.s deleted file mode 100644 index e48d183ee..000000000 --- a/vendor/golang.org/x/crypto/curve25519/mul_amd64.s +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func mul(dest, a, b *[5]uint64) -TEXT ·mul(SB),0,$128-24 - MOVQ dest+0(FP), DI - MOVQ a+8(FP), SI - MOVQ b+16(FP), DX - - MOVQ SP,R11 - MOVQ $31,CX - NOTQ CX - ANDQ CX,SP - ADDQ $32,SP - - MOVQ R11,0(SP) - MOVQ R12,8(SP) - MOVQ R13,16(SP) - MOVQ R14,24(SP) - MOVQ R15,32(SP) - MOVQ BX,40(SP) - MOVQ BP,48(SP) - MOVQ DI,56(SP) - MOVQ DX,CX - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,64(SP) - MULQ 16(CX) - MOVQ AX,R8 - MOVQ DX,R9 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MOVQ AX,72(SP) - MULQ 8(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 0(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 0(SI),AX - MULQ 8(CX) - MOVQ AX,R10 - MOVQ DX,R11 - MOVQ 0(SI),AX - MULQ 16(CX) - MOVQ AX,R12 - MOVQ DX,R13 - MOVQ 0(SI),AX - MULQ 24(CX) - MOVQ AX,R14 - MOVQ DX,R15 - MOVQ 0(SI),AX - MULQ 32(CX) - MOVQ AX,BX - MOVQ DX,BP - MOVQ 8(SI),AX - MULQ 0(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 8(SI),AX - MULQ 8(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 8(SI),AX - MULQ 16(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 8(SI),AX - MULQ 24(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 8(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),AX - MULQ 0(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 16(SI),AX - MULQ 8(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 16(SI),AX - MULQ 16(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(CX) - ADDQ AX,R8 - ADCQ DX,R9 - MOVQ 16(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 24(SI),AX - MULQ 0(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ 24(SI),AX - MULQ 8(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 64(SP),AX - MULQ 24(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 64(SP),AX - MULQ 32(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 32(SI),AX - MULQ 0(CX) - ADDQ AX,BX - ADCQ DX,BP - MOVQ 72(SP),AX - MULQ 16(CX) - ADDQ AX,R10 - ADCQ DX,R11 - MOVQ 72(SP),AX - MULQ 24(CX) - ADDQ AX,R12 - ADCQ DX,R13 - MOVQ 72(SP),AX - MULQ 32(CX) - ADDQ AX,R14 - ADCQ DX,R15 - MOVQ ·REDMASK51(SB),SI - SHLQ $13,R9:R8 - ANDQ SI,R8 - SHLQ $13,R11:R10 - ANDQ SI,R10 - ADDQ R9,R10 - SHLQ $13,R13:R12 - ANDQ SI,R12 - ADDQ R11,R12 - SHLQ $13,R15:R14 - ANDQ SI,R14 - ADDQ R13,R14 - SHLQ $13,BP:BX - ANDQ SI,BX - ADDQ R15,BX - IMUL3Q $19,BP,DX - ADDQ DX,R8 - MOVQ R8,DX - SHRQ $51,DX - ADDQ R10,DX - MOVQ DX,CX - SHRQ $51,DX - ANDQ SI,R8 - ADDQ R12,DX - MOVQ DX,R9 - SHRQ $51,DX - ANDQ SI,CX - ADDQ R14,DX - MOVQ DX,AX - SHRQ $51,DX - ANDQ SI,R9 - ADDQ BX,DX - MOVQ DX,R10 - SHRQ $51,DX - ANDQ SI,AX - IMUL3Q $19,DX,DX - ADDQ DX,R8 - ANDQ SI,R10 - MOVQ R8,0(DI) - MOVQ CX,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - MOVQ 0(SP),R11 - MOVQ 8(SP),R12 - MOVQ 16(SP),R13 - MOVQ 24(SP),R14 - MOVQ 32(SP),R15 - MOVQ 40(SP),BX - MOVQ 48(SP),BP - MOVQ R11,SP - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/vendor/golang.org/x/crypto/curve25519/square_amd64.s b/vendor/golang.org/x/crypto/curve25519/square_amd64.s deleted file mode 100644 index 78d1a50dd..000000000 --- a/vendor/golang.org/x/crypto/curve25519/square_amd64.s +++ /dev/null @@ -1,153 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This code was translated into a form compatible with 6a from the public -// domain sources in SUPERCOP: http://bench.cr.yp.to/supercop.html - -// +build amd64,!gccgo,!appengine - -// func square(out, in *[5]uint64) -TEXT ·square(SB),7,$96-16 - MOVQ out+0(FP), DI - MOVQ in+8(FP), SI - - MOVQ SP,R11 - MOVQ $31,CX - NOTQ CX - ANDQ CX,SP - ADDQ $32, SP - - MOVQ R11,0(SP) - MOVQ R12,8(SP) - MOVQ R13,16(SP) - MOVQ R14,24(SP) - MOVQ R15,32(SP) - MOVQ BX,40(SP) - MOVQ BP,48(SP) - MOVQ 0(SI),AX - MULQ 0(SI) - MOVQ AX,CX - MOVQ DX,R8 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 8(SI) - MOVQ AX,R9 - MOVQ DX,R10 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 16(SI) - MOVQ AX,R11 - MOVQ DX,R12 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 24(SI) - MOVQ AX,R13 - MOVQ DX,R14 - MOVQ 0(SI),AX - SHLQ $1,AX - MULQ 32(SI) - MOVQ AX,R15 - MOVQ DX,BX - MOVQ 8(SI),AX - MULQ 8(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 16(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ 8(SI),AX - SHLQ $1,AX - MULQ 24(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 8(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),AX - MULQ 16(SI) - ADDQ AX,R15 - ADCQ DX,BX - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 24(SI) - ADDQ AX,CX - ADCQ DX,R8 - MOVQ 16(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $19,DX,AX - MULQ 24(SI) - ADDQ AX,R9 - ADCQ DX,R10 - MOVQ 24(SI),DX - IMUL3Q $38,DX,AX - MULQ 32(SI) - ADDQ AX,R11 - ADCQ DX,R12 - MOVQ 32(SI),DX - IMUL3Q $19,DX,AX - MULQ 32(SI) - ADDQ AX,R13 - ADCQ DX,R14 - MOVQ ·REDMASK51(SB),SI - SHLQ $13,R8:CX - ANDQ SI,CX - SHLQ $13,R10:R9 - ANDQ SI,R9 - ADDQ R8,R9 - SHLQ $13,R12:R11 - ANDQ SI,R11 - ADDQ R10,R11 - SHLQ $13,R14:R13 - ANDQ SI,R13 - ADDQ R12,R13 - SHLQ $13,BX:R15 - ANDQ SI,R15 - ADDQ R14,R15 - IMUL3Q $19,BX,DX - ADDQ DX,CX - MOVQ CX,DX - SHRQ $51,DX - ADDQ R9,DX - ANDQ SI,CX - MOVQ DX,R8 - SHRQ $51,DX - ADDQ R11,DX - ANDQ SI,R8 - MOVQ DX,R9 - SHRQ $51,DX - ADDQ R13,DX - ANDQ SI,R9 - MOVQ DX,AX - SHRQ $51,DX - ADDQ R15,DX - ANDQ SI,AX - MOVQ DX,R10 - SHRQ $51,DX - IMUL3Q $19,DX,DX - ADDQ DX,CX - ANDQ SI,R10 - MOVQ CX,0(DI) - MOVQ R8,8(DI) - MOVQ R9,16(DI) - MOVQ AX,24(DI) - MOVQ R10,32(DI) - MOVQ 0(SP),R11 - MOVQ 8(SP),R12 - MOVQ 16(SP),R13 - MOVQ 24(SP),R14 - MOVQ 32(SP),R15 - MOVQ 40(SP),BX - MOVQ 48(SP),BP - MOVQ R11,SP - MOVQ DI,AX - MOVQ SI,DX - RET diff --git a/vendor/golang.org/x/crypto/ed25519/ed25519.go b/vendor/golang.org/x/crypto/ed25519/ed25519.go deleted file mode 100644 index f1d95674a..000000000 --- a/vendor/golang.org/x/crypto/ed25519/ed25519.go +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// http://ed25519.cr.yp.to/. -// -// These functions are also compatible with the “Ed25519” function defined in -// https://tools.ietf.org/html/draft-irtf-cfrg-eddsa-05. -package ed25519 - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -import ( - "crypto" - cryptorand "crypto/rand" - "crypto/sha512" - "crypto/subtle" - "errors" - "io" - "strconv" - - "golang.org/x/crypto/ed25519/internal/edwards25519" -) - -const ( - // PublicKeySize is the size, in bytes, of public keys as used in this package. - PublicKeySize = 32 - // PrivateKeySize is the size, in bytes, of private keys as used in this package. - PrivateKeySize = 64 - // SignatureSize is the size, in bytes, of signatures generated and verified by this package. - SignatureSize = 64 -) - -// PublicKey is the type of Ed25519 public keys. -type PublicKey []byte - -// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer. -type PrivateKey []byte - -// Public returns the PublicKey corresponding to priv. -func (priv PrivateKey) Public() crypto.PublicKey { - publicKey := make([]byte, PublicKeySize) - copy(publicKey, priv[32:]) - return PublicKey(publicKey) -} - -// Sign signs the given message with priv. -// Ed25519 performs two passes over messages to be signed and therefore cannot -// handle pre-hashed messages. Thus opts.HashFunc() must return zero to -// indicate the message hasn't been hashed. This can be achieved by passing -// crypto.Hash(0) as the value for opts. -func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) { - if opts.HashFunc() != crypto.Hash(0) { - return nil, errors.New("ed25519: cannot sign hashed message") - } - - return Sign(priv, message), nil -} - -// GenerateKey generates a public/private key pair using entropy from rand. -// If rand is nil, crypto/rand.Reader will be used. -func GenerateKey(rand io.Reader) (publicKey PublicKey, privateKey PrivateKey, err error) { - if rand == nil { - rand = cryptorand.Reader - } - - privateKey = make([]byte, PrivateKeySize) - publicKey = make([]byte, PublicKeySize) - _, err = io.ReadFull(rand, privateKey[:32]) - if err != nil { - return nil, nil, err - } - - digest := sha512.Sum512(privateKey[:32]) - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest[:]) - edwards25519.GeScalarMultBase(&A, &hBytes) - var publicKeyBytes [32]byte - A.ToBytes(&publicKeyBytes) - - copy(privateKey[32:], publicKeyBytes[:]) - copy(publicKey, publicKeyBytes[:]) - - return publicKey, privateKey, nil -} - -// Sign signs the message with privateKey and returns a signature. It will -// panic if len(privateKey) is not PrivateKeySize. -func Sign(privateKey PrivateKey, message []byte) []byte { - if l := len(privateKey); l != PrivateKeySize { - panic("ed25519: bad private key length: " + strconv.Itoa(l)) - } - - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := make([]byte, SignatureSize) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - - return signature -} - -// Verify reports whether sig is a valid signature of message by publicKey. It -// will panic if len(publicKey) is not PublicKeySize. -func Verify(publicKey PublicKey, message, sig []byte) bool { - if l := len(publicKey); l != PublicKeySize { - panic("ed25519: bad public key length: " + strconv.Itoa(l)) - } - - if len(sig) != SignatureSize || sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - var publicKeyBytes [32]byte - copy(publicKeyBytes[:], publicKey) - if !A.FromBytes(&publicKeyBytes) { - return false - } - edwards25519.FeNeg(&A.X, &A.X) - edwards25519.FeNeg(&A.T, &A.T) - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var b [32]byte - copy(b[:], sig[32:]) - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) - - var checkR [32]byte - R.ToBytes(&checkR) - return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go deleted file mode 100644 index e39f086c1..000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go +++ /dev/null @@ -1,1422 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// These values are from the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// d is a constant in the Edwards curve equation. -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -// d2 is 2*d. -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -// SqrtM1 is the square-root of -1 in the field. -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -// A is a constant in the Montgomery-form of curve25519. -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -// bi contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -// base contains precomputed multiples of the base-point. See the Ed25519 paper -// for a discussion about how these values are used. -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go b/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go deleted file mode 100644 index 5f8b99478..000000000 --- a/vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go +++ /dev/null @@ -1,1771 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -// This code is a port of the public domain, “ref10” implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -var zero FieldElement - -func FeZero(fe *FieldElement) { - copy(fe[:], zero[:]) -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - dst[0] = a[0] + b[0] - dst[1] = a[1] + b[1] - dst[2] = a[2] + b[2] - dst[3] = a[3] + b[3] - dst[4] = a[4] + b[4] - dst[5] = a[5] + b[5] - dst[6] = a[6] + b[6] - dst[7] = a[7] + b[7] - dst[8] = a[8] + b[8] - dst[9] = a[9] + b[9] -} - -func FeSub(dst, a, b *FieldElement) { - dst[0] = a[0] - b[0] - dst[1] = a[1] - b[1] - dst[2] = a[2] - b[2] - dst[3] = a[3] - b[3] - dst[4] = a[4] - b[4] - dst[5] = a[5] - b[5] - dst[6] = a[6] - b[6] - dst[7] = a[7] - b[7] - dst[8] = a[8] - b[8] - dst[9] = a[9] - b[9] -} - -func FeCopy(dst, src *FieldElement) { - copy(dst[:], src[:]) -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - b = -b - f[0] ^= b & (f[0] ^ g[0]) - f[1] ^= b & (f[1] ^ g[1]) - f[2] ^= b & (f[2] ^ g[2]) - f[3] ^= b & (f[3] ^ g[3]) - f[4] ^= b & (f[4] ^ g[4]) - f[5] ^= b & (f[5] ^ g[5]) - f[6] ^= b & (f[6] ^ g[6]) - f[7] ^= b & (f[7] ^ g[7]) - f[8] ^= b & (f[8] ^ g[8]) - f[9] ^= b & (f[9] ^ g[9]) -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - FeCombine(dst, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - h[0] = -f[0] - h[1] = -f[1] - h[2] = -f[2] - h[3] = -f[3] - h[4] = -f[4] - h[5] = -f[5] - h[6] = -f[6] - h[7] = -f[7] - h[8] = -f[8] - h[9] = -f[9] -} - -func FeCombine(h *FieldElement, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - var c0, c1, c2, c3, c4, c5, c6, c7, c8, c9 int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - c1 = (h1 + (1 << 24)) >> 25 - h2 += c1 - h1 -= c1 << 25 - c5 = (h5 + (1 << 24)) >> 25 - h6 += c5 - h5 -= c5 << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - c2 = (h2 + (1 << 25)) >> 26 - h3 += c2 - h2 -= c2 << 26 - c6 = (h6 + (1 << 25)) >> 26 - h7 += c6 - h6 -= c6 << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - c3 = (h3 + (1 << 24)) >> 25 - h4 += c3 - h3 -= c3 << 25 - c7 = (h7 + (1 << 24)) >> 25 - h8 += c7 - h7 -= c7 << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - c4 = (h4 + (1 << 25)) >> 26 - h5 += c4 - h4 -= c4 << 26 - c8 = (h8 + (1 << 25)) >> 26 - h9 += c8 - h8 -= c8 << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - c9 = (h9 + (1 << 24)) >> 25 - h0 += c9 * 19 - h9 -= c9 << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - c0 = (h0 + (1 << 25)) >> 26 - h1 += c0 - h0 -= c0 << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs, can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - - f1_2 := int64(2 * f[1]) - f3_2 := int64(2 * f[3]) - f5_2 := int64(2 * f[5]) - f7_2 := int64(2 * f[7]) - f9_2 := int64(2 * f[9]) - - g0 := int64(g[0]) - g1 := int64(g[1]) - g2 := int64(g[2]) - g3 := int64(g[3]) - g4 := int64(g[4]) - g5 := int64(g[5]) - g6 := int64(g[6]) - g7 := int64(g[7]) - g8 := int64(g[8]) - g9 := int64(g[9]) - - g1_19 := int64(19 * g[1]) /* 1.4*2^29 */ - g2_19 := int64(19 * g[2]) /* 1.4*2^30; still ok */ - g3_19 := int64(19 * g[3]) - g4_19 := int64(19 * g[4]) - g5_19 := int64(19 * g[5]) - g6_19 := int64(19 * g[6]) - g7_19 := int64(19 * g[7]) - g8_19 := int64(19 * g[8]) - g9_19 := int64(19 * g[9]) - - h0 := f0*g0 + f1_2*g9_19 + f2*g8_19 + f3_2*g7_19 + f4*g6_19 + f5_2*g5_19 + f6*g4_19 + f7_2*g3_19 + f8*g2_19 + f9_2*g1_19 - h1 := f0*g1 + f1*g0 + f2*g9_19 + f3*g8_19 + f4*g7_19 + f5*g6_19 + f6*g5_19 + f7*g4_19 + f8*g3_19 + f9*g2_19 - h2 := f0*g2 + f1_2*g1 + f2*g0 + f3_2*g9_19 + f4*g8_19 + f5_2*g7_19 + f6*g6_19 + f7_2*g5_19 + f8*g4_19 + f9_2*g3_19 - h3 := f0*g3 + f1*g2 + f2*g1 + f3*g0 + f4*g9_19 + f5*g8_19 + f6*g7_19 + f7*g6_19 + f8*g5_19 + f9*g4_19 - h4 := f0*g4 + f1_2*g3 + f2*g2 + f3_2*g1 + f4*g0 + f5_2*g9_19 + f6*g8_19 + f7_2*g7_19 + f8*g6_19 + f9_2*g5_19 - h5 := f0*g5 + f1*g4 + f2*g3 + f3*g2 + f4*g1 + f5*g0 + f6*g9_19 + f7*g8_19 + f8*g7_19 + f9*g6_19 - h6 := f0*g6 + f1_2*g5 + f2*g4 + f3_2*g3 + f4*g2 + f5_2*g1 + f6*g0 + f7_2*g9_19 + f8*g8_19 + f9_2*g7_19 - h7 := f0*g7 + f1*g6 + f2*g5 + f3*g4 + f4*g3 + f5*g2 + f6*g1 + f7*g0 + f8*g9_19 + f9*g8_19 - h8 := f0*g8 + f1_2*g7 + f2*g6 + f3_2*g5 + f4*g4 + f5_2*g3 + f6*g2 + f7_2*g1 + f8*g0 + f9_2*g9_19 - h9 := f0*g9 + f1*g8 + f2*g7 + f3*g6 + f4*g5 + f5*g4 + f6*g3 + f7*g2 + f8*g1 + f9*g0 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func feSquare(f *FieldElement) (h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 int64) { - f0 := int64(f[0]) - f1 := int64(f[1]) - f2 := int64(f[2]) - f3 := int64(f[3]) - f4 := int64(f[4]) - f5 := int64(f[5]) - f6 := int64(f[6]) - f7 := int64(f[7]) - f8 := int64(f[8]) - f9 := int64(f[9]) - f0_2 := int64(2 * f[0]) - f1_2 := int64(2 * f[1]) - f2_2 := int64(2 * f[2]) - f3_2 := int64(2 * f[3]) - f4_2 := int64(2 * f[4]) - f5_2 := int64(2 * f[5]) - f6_2 := int64(2 * f[6]) - f7_2 := int64(2 * f[7]) - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - - h0 = f0*f0 + f1_2*f9_38 + f2_2*f8_19 + f3_2*f7_38 + f4_2*f6_19 + f5*f5_38 - h1 = f0_2*f1 + f2*f9_38 + f3_2*f8_19 + f4*f7_38 + f5_2*f6_19 - h2 = f0_2*f2 + f1_2*f1 + f3_2*f9_38 + f4_2*f8_19 + f5_2*f7_38 + f6*f6_19 - h3 = f0_2*f3 + f1_2*f2 + f4*f9_38 + f5_2*f8_19 + f6*f7_38 - h4 = f0_2*f4 + f1_2*f3_2 + f2*f2 + f5_2*f9_38 + f6_2*f8_19 + f7*f7_38 - h5 = f0_2*f5 + f1_2*f4 + f2_2*f3 + f6*f9_38 + f7_2*f8_19 - h6 = f0_2*f6 + f1_2*f5_2 + f2_2*f4 + f3_2*f3 + f7_2*f9_38 + f8*f8_19 - h7 = f0_2*f7 + f1_2*f6 + f2_2*f5 + f3_2*f4 + f8*f9_38 - h8 = f0_2*f8 + f1_2*f7_2 + f2_2*f6 + f3_2*f5_2 + f4*f4 + f9*f9_38 - h9 = f0_2*f9 + f1_2*f8 + f2_2*f7 + f3_2*f6 + f4_2*f5 - - return -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - h0, h1, h2, h3, h4, h5, h6, h7, h8, h9 := feSquare(f) - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - FeCombine(h, h0, h1, h2, h3, h4, h5, h6, h7, h8, h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) != (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise, assuming that b and c are -// non-negative. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} diff --git a/vendor/golang.org/x/crypto/ssh/buffer.go b/vendor/golang.org/x/crypto/ssh/buffer.go deleted file mode 100644 index 6931b5114..000000000 --- a/vendor/golang.org/x/crypto/ssh/buffer.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "io" - "sync" -) - -// buffer provides a linked list buffer for data exchange -// between producer and consumer. Theoretically the buffer is -// of unlimited capacity as it does no allocation of its own. -type buffer struct { - // protects concurrent access to head, tail and closed - *sync.Cond - - head *element // the buffer that will be read first - tail *element // the buffer that will be read last - - closed bool -} - -// An element represents a single link in a linked list. -type element struct { - buf []byte - next *element -} - -// newBuffer returns an empty buffer that is not closed. -func newBuffer() *buffer { - e := new(element) - b := &buffer{ - Cond: newCond(), - head: e, - tail: e, - } - return b -} - -// write makes buf available for Read to receive. -// buf must not be modified after the call to write. -func (b *buffer) write(buf []byte) { - b.Cond.L.Lock() - e := &element{buf: buf} - b.tail.next = e - b.tail = e - b.Cond.Signal() - b.Cond.L.Unlock() -} - -// eof closes the buffer. Reads from the buffer once all -// the data has been consumed will receive os.EOF. -func (b *buffer) eof() error { - b.Cond.L.Lock() - b.closed = true - b.Cond.Signal() - b.Cond.L.Unlock() - return nil -} - -// Read reads data from the internal buffer in buf. Reads will block -// if no data is available, or until the buffer is closed. -func (b *buffer) Read(buf []byte) (n int, err error) { - b.Cond.L.Lock() - defer b.Cond.L.Unlock() - - for len(buf) > 0 { - // if there is data in b.head, copy it - if len(b.head.buf) > 0 { - r := copy(buf, b.head.buf) - buf, b.head.buf = buf[r:], b.head.buf[r:] - n += r - continue - } - // if there is a next buffer, make it the head - if len(b.head.buf) == 0 && b.head != b.tail { - b.head = b.head.next - continue - } - - // if at least one byte has been copied, return - if n > 0 { - break - } - - // if nothing was read, and there is nothing outstanding - // check to see if the buffer is closed. - if b.closed { - err = io.EOF - break - } - // out of buffers, wait for producer - b.Cond.Wait() - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/certs.go b/vendor/golang.org/x/crypto/ssh/certs.go deleted file mode 100644 index 6331c94d5..000000000 --- a/vendor/golang.org/x/crypto/ssh/certs.go +++ /dev/null @@ -1,503 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" - "sort" - "time" -) - -// These constants from [PROTOCOL.certkeys] represent the algorithm names -// for certificate types supported by this package. -const ( - CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com" - CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com" - CertAlgoECDSA256v01 = "ecdsa-sha2-nistp256-cert-v01@openssh.com" - CertAlgoECDSA384v01 = "ecdsa-sha2-nistp384-cert-v01@openssh.com" - CertAlgoECDSA521v01 = "ecdsa-sha2-nistp521-cert-v01@openssh.com" - CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com" -) - -// Certificate types distinguish between host and user -// certificates. The values can be set in the CertType field of -// Certificate. -const ( - UserCert = 1 - HostCert = 2 -) - -// Signature represents a cryptographic signature. -type Signature struct { - Format string - Blob []byte -} - -// CertTimeInfinity can be used for OpenSSHCertV01.ValidBefore to indicate that -// a certificate does not expire. -const CertTimeInfinity = 1<<64 - 1 - -// An Certificate represents an OpenSSH certificate as defined in -// [PROTOCOL.certkeys]?rev=1.8. -type Certificate struct { - Nonce []byte - Key PublicKey - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []string - ValidAfter uint64 - ValidBefore uint64 - Permissions - Reserved []byte - SignatureKey PublicKey - Signature *Signature -} - -// genericCertData holds the key-independent part of the certificate data. -// Overall, certificates contain an nonce, public key fields and -// key-independent fields. -type genericCertData struct { - Serial uint64 - CertType uint32 - KeyId string - ValidPrincipals []byte - ValidAfter uint64 - ValidBefore uint64 - CriticalOptions []byte - Extensions []byte - Reserved []byte - SignatureKey []byte - Signature []byte -} - -func marshalStringList(namelist []string) []byte { - var to []byte - for _, name := range namelist { - s := struct{ N string }{name} - to = append(to, Marshal(&s)...) - } - return to -} - -type optionsTuple struct { - Key string - Value []byte -} - -type optionsTupleValue struct { - Value string -} - -// serialize a map of critical options or extensions -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty string value -func marshalTuples(tups map[string]string) []byte { - keys := make([]string, 0, len(tups)) - for key := range tups { - keys = append(keys, key) - } - sort.Strings(keys) - - var ret []byte - for _, key := range keys { - s := optionsTuple{Key: key} - if value := tups[key]; len(value) > 0 { - s.Value = Marshal(&optionsTupleValue{value}) - } - ret = append(ret, Marshal(&s)...) - } - return ret -} - -// issue #10569 - per [PROTOCOL.certkeys] and SSH implementation, -// we need two length prefixes for a non-empty option value -func parseTuples(in []byte) (map[string]string, error) { - tups := map[string]string{} - var lastKey string - var haveLastKey bool - - for len(in) > 0 { - var key, val, extra []byte - var ok bool - - if key, in, ok = parseString(in); !ok { - return nil, errShortRead - } - keyStr := string(key) - // according to [PROTOCOL.certkeys], the names must be in - // lexical order. - if haveLastKey && keyStr <= lastKey { - return nil, fmt.Errorf("ssh: certificate options are not in lexical order") - } - lastKey, haveLastKey = keyStr, true - // the next field is a data field, which if non-empty has a string embedded - if val, in, ok = parseString(in); !ok { - return nil, errShortRead - } - if len(val) > 0 { - val, extra, ok = parseString(val) - if !ok { - return nil, errShortRead - } - if len(extra) > 0 { - return nil, fmt.Errorf("ssh: unexpected trailing data after certificate option value") - } - tups[keyStr] = string(val) - } else { - tups[keyStr] = "" - } - } - return tups, nil -} - -func parseCert(in []byte, privAlgo string) (*Certificate, error) { - nonce, rest, ok := parseString(in) - if !ok { - return nil, errShortRead - } - - key, rest, err := parsePubKey(rest, privAlgo) - if err != nil { - return nil, err - } - - var g genericCertData - if err := Unmarshal(rest, &g); err != nil { - return nil, err - } - - c := &Certificate{ - Nonce: nonce, - Key: key, - Serial: g.Serial, - CertType: g.CertType, - KeyId: g.KeyId, - ValidAfter: g.ValidAfter, - ValidBefore: g.ValidBefore, - } - - for principals := g.ValidPrincipals; len(principals) > 0; { - principal, rest, ok := parseString(principals) - if !ok { - return nil, errShortRead - } - c.ValidPrincipals = append(c.ValidPrincipals, string(principal)) - principals = rest - } - - c.CriticalOptions, err = parseTuples(g.CriticalOptions) - if err != nil { - return nil, err - } - c.Extensions, err = parseTuples(g.Extensions) - if err != nil { - return nil, err - } - c.Reserved = g.Reserved - k, err := ParsePublicKey(g.SignatureKey) - if err != nil { - return nil, err - } - - c.SignatureKey = k - c.Signature, rest, ok = parseSignatureBody(g.Signature) - if !ok || len(rest) > 0 { - return nil, errors.New("ssh: signature parse error") - } - - return c, nil -} - -type openSSHCertSigner struct { - pub *Certificate - signer Signer -} - -// NewCertSigner returns a Signer that signs with the given Certificate, whose -// private key is held by signer. It returns an error if the public key in cert -// doesn't match the key used by signer. -func NewCertSigner(cert *Certificate, signer Signer) (Signer, error) { - if bytes.Compare(cert.Key.Marshal(), signer.PublicKey().Marshal()) != 0 { - return nil, errors.New("ssh: signer and cert have different public key") - } - - return &openSSHCertSigner{cert, signer}, nil -} - -func (s *openSSHCertSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - return s.signer.Sign(rand, data) -} - -func (s *openSSHCertSigner) PublicKey() PublicKey { - return s.pub -} - -const sourceAddressCriticalOption = "source-address" - -// CertChecker does the work of verifying a certificate. Its methods -// can be plugged into ClientConfig.HostKeyCallback and -// ServerConfig.PublicKeyCallback. For the CertChecker to work, -// minimally, the IsAuthority callback should be set. -type CertChecker struct { - // SupportedCriticalOptions lists the CriticalOptions that the - // server application layer understands. These are only used - // for user certificates. - SupportedCriticalOptions []string - - // IsAuthority should return true if the key is recognized as - // an authority. This allows for certificates to be signed by other - // certificates. - IsAuthority func(auth PublicKey) bool - - // Clock is used for verifying time stamps. If nil, time.Now - // is used. - Clock func() time.Time - - // UserKeyFallback is called when CertChecker.Authenticate encounters a - // public key that is not a certificate. It must implement validation - // of user keys or else, if nil, all such keys are rejected. - UserKeyFallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // HostKeyFallback is called when CertChecker.CheckHostKey encounters a - // public key that is not a certificate. It must implement host key - // validation or else, if nil, all such keys are rejected. - HostKeyFallback func(addr string, remote net.Addr, key PublicKey) error - - // IsRevoked is called for each certificate so that revocation checking - // can be implemented. It should return true if the given certificate - // is revoked and false otherwise. If nil, no certificates are - // considered to have been revoked. - IsRevoked func(cert *Certificate) bool -} - -// CheckHostKey checks a host key certificate. This method can be -// plugged into ClientConfig.HostKeyCallback. -func (c *CertChecker) CheckHostKey(addr string, remote net.Addr, key PublicKey) error { - cert, ok := key.(*Certificate) - if !ok { - if c.HostKeyFallback != nil { - return c.HostKeyFallback(addr, remote, key) - } - return errors.New("ssh: non-certificate host key") - } - if cert.CertType != HostCert { - return fmt.Errorf("ssh: certificate presented as a host key has type %d", cert.CertType) - } - - return c.CheckCert(addr, cert) -} - -// Authenticate checks a user certificate. Authenticate can be used as -// a value for ServerConfig.PublicKeyCallback. -func (c *CertChecker) Authenticate(conn ConnMetadata, pubKey PublicKey) (*Permissions, error) { - cert, ok := pubKey.(*Certificate) - if !ok { - if c.UserKeyFallback != nil { - return c.UserKeyFallback(conn, pubKey) - } - return nil, errors.New("ssh: normal key pairs not accepted") - } - - if cert.CertType != UserCert { - return nil, fmt.Errorf("ssh: cert has type %d", cert.CertType) - } - - if err := c.CheckCert(conn.User(), cert); err != nil { - return nil, err - } - - return &cert.Permissions, nil -} - -// CheckCert checks CriticalOptions, ValidPrincipals, revocation, timestamp and -// the signature of the certificate. -func (c *CertChecker) CheckCert(principal string, cert *Certificate) error { - if c.IsRevoked != nil && c.IsRevoked(cert) { - return fmt.Errorf("ssh: certicate serial %d revoked", cert.Serial) - } - - for opt, _ := range cert.CriticalOptions { - // sourceAddressCriticalOption will be enforced by - // serverAuthenticate - if opt == sourceAddressCriticalOption { - continue - } - - found := false - for _, supp := range c.SupportedCriticalOptions { - if supp == opt { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: unsupported critical option %q in certificate", opt) - } - } - - if len(cert.ValidPrincipals) > 0 { - // By default, certs are valid for all users/hosts. - found := false - for _, p := range cert.ValidPrincipals { - if p == principal { - found = true - break - } - } - if !found { - return fmt.Errorf("ssh: principal %q not in the set of valid principals for given certificate: %q", principal, cert.ValidPrincipals) - } - } - - if !c.IsAuthority(cert.SignatureKey) { - return fmt.Errorf("ssh: certificate signed by unrecognized authority") - } - - clock := c.Clock - if clock == nil { - clock = time.Now - } - - unixNow := clock().Unix() - if after := int64(cert.ValidAfter); after < 0 || unixNow < int64(cert.ValidAfter) { - return fmt.Errorf("ssh: cert is not yet valid") - } - if before := int64(cert.ValidBefore); cert.ValidBefore != uint64(CertTimeInfinity) && (unixNow >= before || before < 0) { - return fmt.Errorf("ssh: cert has expired") - } - if err := cert.SignatureKey.Verify(cert.bytesForSigning(), cert.Signature); err != nil { - return fmt.Errorf("ssh: certificate signature does not verify") - } - - return nil -} - -// SignCert sets c.SignatureKey to the authority's public key and stores a -// Signature, by authority, in the certificate. -func (c *Certificate) SignCert(rand io.Reader, authority Signer) error { - c.Nonce = make([]byte, 32) - if _, err := io.ReadFull(rand, c.Nonce); err != nil { - return err - } - c.SignatureKey = authority.PublicKey() - - sig, err := authority.Sign(rand, c.bytesForSigning()) - if err != nil { - return err - } - c.Signature = sig - return nil -} - -var certAlgoNames = map[string]string{ - KeyAlgoRSA: CertAlgoRSAv01, - KeyAlgoDSA: CertAlgoDSAv01, - KeyAlgoECDSA256: CertAlgoECDSA256v01, - KeyAlgoECDSA384: CertAlgoECDSA384v01, - KeyAlgoECDSA521: CertAlgoECDSA521v01, - KeyAlgoED25519: CertAlgoED25519v01, -} - -// certToPrivAlgo returns the underlying algorithm for a certificate algorithm. -// Panics if a non-certificate algorithm is passed. -func certToPrivAlgo(algo string) string { - for privAlgo, pubAlgo := range certAlgoNames { - if pubAlgo == algo { - return privAlgo - } - } - panic("unknown cert algorithm") -} - -func (cert *Certificate) bytesForSigning() []byte { - c2 := *cert - c2.Signature = nil - out := c2.Marshal() - // Drop trailing signature length. - return out[:len(out)-4] -} - -// Marshal serializes c into OpenSSH's wire format. It is part of the -// PublicKey interface. -func (c *Certificate) Marshal() []byte { - generic := genericCertData{ - Serial: c.Serial, - CertType: c.CertType, - KeyId: c.KeyId, - ValidPrincipals: marshalStringList(c.ValidPrincipals), - ValidAfter: uint64(c.ValidAfter), - ValidBefore: uint64(c.ValidBefore), - CriticalOptions: marshalTuples(c.CriticalOptions), - Extensions: marshalTuples(c.Extensions), - Reserved: c.Reserved, - SignatureKey: c.SignatureKey.Marshal(), - } - if c.Signature != nil { - generic.Signature = Marshal(c.Signature) - } - genericBytes := Marshal(&generic) - keyBytes := c.Key.Marshal() - _, keyBytes, _ = parseString(keyBytes) - prefix := Marshal(&struct { - Name string - Nonce []byte - Key []byte `ssh:"rest"` - }{c.Type(), c.Nonce, keyBytes}) - - result := make([]byte, 0, len(prefix)+len(genericBytes)) - result = append(result, prefix...) - result = append(result, genericBytes...) - return result -} - -// Type returns the key name. It is part of the PublicKey interface. -func (c *Certificate) Type() string { - algo, ok := certAlgoNames[c.Key.Type()] - if !ok { - panic("unknown cert key type " + c.Key.Type()) - } - return algo -} - -// Verify verifies a signature against the certificate's public -// key. It is part of the PublicKey interface. -func (c *Certificate) Verify(data []byte, sig *Signature) error { - return c.Key.Verify(data, sig) -} - -func parseSignatureBody(in []byte) (out *Signature, rest []byte, ok bool) { - format, in, ok := parseString(in) - if !ok { - return - } - - out = &Signature{ - Format: string(format), - } - - if out.Blob, in, ok = parseString(in); !ok { - return - } - - return out, in, ok -} - -func parseSignature(in []byte) (out *Signature, rest []byte, ok bool) { - sigBytes, rest, ok := parseString(in) - if !ok { - return - } - - out, trailing, ok := parseSignatureBody(sigBytes) - if !ok || len(trailing) > 0 { - return nil, nil, false - } - return -} diff --git a/vendor/golang.org/x/crypto/ssh/channel.go b/vendor/golang.org/x/crypto/ssh/channel.go deleted file mode 100644 index 6d709b50b..000000000 --- a/vendor/golang.org/x/crypto/ssh/channel.go +++ /dev/null @@ -1,633 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "log" - "sync" -) - -const ( - minPacketLength = 9 - // channelMaxPacket contains the maximum number of bytes that will be - // sent in a single packet. As per RFC 4253, section 6.1, 32k is also - // the minimum. - channelMaxPacket = 1 << 15 - // We follow OpenSSH here. - channelWindowSize = 64 * channelMaxPacket -) - -// NewChannel represents an incoming request to a channel. It must either be -// accepted for use by calling Accept, or rejected by calling Reject. -type NewChannel interface { - // Accept accepts the channel creation request. It returns the Channel - // and a Go channel containing SSH requests. The Go channel must be - // serviced otherwise the Channel will hang. - Accept() (Channel, <-chan *Request, error) - - // Reject rejects the channel creation request. After calling - // this, no other methods on the Channel may be called. - Reject(reason RejectionReason, message string) error - - // ChannelType returns the type of the channel, as supplied by the - // client. - ChannelType() string - - // ExtraData returns the arbitrary payload for this channel, as supplied - // by the client. This data is specific to the channel type. - ExtraData() []byte -} - -// A Channel is an ordered, reliable, flow-controlled, duplex stream -// that is multiplexed over an SSH connection. -type Channel interface { - // Read reads up to len(data) bytes from the channel. - Read(data []byte) (int, error) - - // Write writes len(data) bytes to the channel. - Write(data []byte) (int, error) - - // Close signals end of channel use. No data may be sent after this - // call. - Close() error - - // CloseWrite signals the end of sending in-band - // data. Requests may still be sent, and the other side may - // still send data - CloseWrite() error - - // SendRequest sends a channel request. If wantReply is true, - // it will wait for a reply and return the result as a - // boolean, otherwise the return value will be false. Channel - // requests are out-of-band messages so they may be sent even - // if the data stream is closed or blocked by flow control. - // If the channel is closed before a reply is returned, io.EOF - // is returned. - SendRequest(name string, wantReply bool, payload []byte) (bool, error) - - // Stderr returns an io.ReadWriter that writes to this channel - // with the extended data type set to stderr. Stderr may - // safely be read and written from a different goroutine than - // Read and Write respectively. - Stderr() io.ReadWriter -} - -// Request is a request sent outside of the normal stream of -// data. Requests can either be specific to an SSH channel, or they -// can be global. -type Request struct { - Type string - WantReply bool - Payload []byte - - ch *channel - mux *mux -} - -// Reply sends a response to a request. It must be called for all requests -// where WantReply is true and is a no-op otherwise. The payload argument is -// ignored for replies to channel-specific requests. -func (r *Request) Reply(ok bool, payload []byte) error { - if !r.WantReply { - return nil - } - - if r.ch == nil { - return r.mux.ackRequest(ok, payload) - } - - return r.ch.ackRequest(ok) -} - -// RejectionReason is an enumeration used when rejecting channel creation -// requests. See RFC 4254, section 5.1. -type RejectionReason uint32 - -const ( - Prohibited RejectionReason = iota + 1 - ConnectionFailed - UnknownChannelType - ResourceShortage -) - -// String converts the rejection reason to human readable form. -func (r RejectionReason) String() string { - switch r { - case Prohibited: - return "administratively prohibited" - case ConnectionFailed: - return "connect failed" - case UnknownChannelType: - return "unknown channel type" - case ResourceShortage: - return "resource shortage" - } - return fmt.Sprintf("unknown reason %d", int(r)) -} - -func min(a uint32, b int) uint32 { - if a < uint32(b) { - return a - } - return uint32(b) -} - -type channelDirection uint8 - -const ( - channelInbound channelDirection = iota - channelOutbound -) - -// channel is an implementation of the Channel interface that works -// with the mux class. -type channel struct { - // R/O after creation - chanType string - extraData []byte - localId, remoteId uint32 - - // maxIncomingPayload and maxRemotePayload are the maximum - // payload sizes of normal and extended data packets for - // receiving and sending, respectively. The wire packet will - // be 9 or 13 bytes larger (excluding encryption overhead). - maxIncomingPayload uint32 - maxRemotePayload uint32 - - mux *mux - - // decided is set to true if an accept or reject message has been sent - // (for outbound channels) or received (for inbound channels). - decided bool - - // direction contains either channelOutbound, for channels created - // locally, or channelInbound, for channels created by the peer. - direction channelDirection - - // Pending internal channel messages. - msg chan interface{} - - // Since requests have no ID, there can be only one request - // with WantReply=true outstanding. This lock is held by a - // goroutine that has such an outgoing request pending. - sentRequestMu sync.Mutex - - incomingRequests chan *Request - - sentEOF bool - - // thread-safe data - remoteWin window - pending *buffer - extPending *buffer - - // windowMu protects myWindow, the flow-control window. - windowMu sync.Mutex - myWindow uint32 - - // writeMu serializes calls to mux.conn.writePacket() and - // protects sentClose and packetPool. This mutex must be - // different from windowMu, as writePacket can block if there - // is a key exchange pending. - writeMu sync.Mutex - sentClose bool - - // packetPool has a buffer for each extended channel ID to - // save allocations during writes. - packetPool map[uint32][]byte -} - -// writePacket sends a packet. If the packet is a channel close, it updates -// sentClose. This method takes the lock c.writeMu. -func (c *channel) writePacket(packet []byte) error { - c.writeMu.Lock() - if c.sentClose { - c.writeMu.Unlock() - return io.EOF - } - c.sentClose = (packet[0] == msgChannelClose) - err := c.mux.conn.writePacket(packet) - c.writeMu.Unlock() - return err -} - -func (c *channel) sendMessage(msg interface{}) error { - if debugMux { - log.Printf("send(%d): %#v", c.mux.chanList.offset, msg) - } - - p := Marshal(msg) - binary.BigEndian.PutUint32(p[1:], c.remoteId) - return c.writePacket(p) -} - -// WriteExtended writes data to a specific extended stream. These streams are -// used, for example, for stderr. -func (c *channel) WriteExtended(data []byte, extendedCode uint32) (n int, err error) { - if c.sentEOF { - return 0, io.EOF - } - // 1 byte message type, 4 bytes remoteId, 4 bytes data length - opCode := byte(msgChannelData) - headerLength := uint32(9) - if extendedCode > 0 { - headerLength += 4 - opCode = msgChannelExtendedData - } - - c.writeMu.Lock() - packet := c.packetPool[extendedCode] - // We don't remove the buffer from packetPool, so - // WriteExtended calls from different goroutines will be - // flagged as errors by the race detector. - c.writeMu.Unlock() - - for len(data) > 0 { - space := min(c.maxRemotePayload, len(data)) - if space, err = c.remoteWin.reserve(space); err != nil { - return n, err - } - if want := headerLength + space; uint32(cap(packet)) < want { - packet = make([]byte, want) - } else { - packet = packet[:want] - } - - todo := data[:space] - - packet[0] = opCode - binary.BigEndian.PutUint32(packet[1:], c.remoteId) - if extendedCode > 0 { - binary.BigEndian.PutUint32(packet[5:], uint32(extendedCode)) - } - binary.BigEndian.PutUint32(packet[headerLength-4:], uint32(len(todo))) - copy(packet[headerLength:], todo) - if err = c.writePacket(packet); err != nil { - return n, err - } - - n += len(todo) - data = data[len(todo):] - } - - c.writeMu.Lock() - c.packetPool[extendedCode] = packet - c.writeMu.Unlock() - - return n, err -} - -func (c *channel) handleData(packet []byte) error { - headerLen := 9 - isExtendedData := packet[0] == msgChannelExtendedData - if isExtendedData { - headerLen = 13 - } - if len(packet) < headerLen { - // malformed data packet - return parseError(packet[0]) - } - - var extended uint32 - if isExtendedData { - extended = binary.BigEndian.Uint32(packet[5:]) - } - - length := binary.BigEndian.Uint32(packet[headerLen-4 : headerLen]) - if length == 0 { - return nil - } - if length > c.maxIncomingPayload { - // TODO(hanwen): should send Disconnect? - return errors.New("ssh: incoming packet exceeds maximum payload size") - } - - data := packet[headerLen:] - if length != uint32(len(data)) { - return errors.New("ssh: wrong packet length") - } - - c.windowMu.Lock() - if c.myWindow < length { - c.windowMu.Unlock() - // TODO(hanwen): should send Disconnect with reason? - return errors.New("ssh: remote side wrote too much") - } - c.myWindow -= length - c.windowMu.Unlock() - - if extended == 1 { - c.extPending.write(data) - } else if extended > 0 { - // discard other extended data. - } else { - c.pending.write(data) - } - return nil -} - -func (c *channel) adjustWindow(n uint32) error { - c.windowMu.Lock() - // Since myWindow is managed on our side, and can never exceed - // the initial window setting, we don't worry about overflow. - c.myWindow += uint32(n) - c.windowMu.Unlock() - return c.sendMessage(windowAdjustMsg{ - AdditionalBytes: uint32(n), - }) -} - -func (c *channel) ReadExtended(data []byte, extended uint32) (n int, err error) { - switch extended { - case 1: - n, err = c.extPending.Read(data) - case 0: - n, err = c.pending.Read(data) - default: - return 0, fmt.Errorf("ssh: extended code %d unimplemented", extended) - } - - if n > 0 { - err = c.adjustWindow(uint32(n)) - // sendWindowAdjust can return io.EOF if the remote - // peer has closed the connection, however we want to - // defer forwarding io.EOF to the caller of Read until - // the buffer has been drained. - if n > 0 && err == io.EOF { - err = nil - } - } - - return n, err -} - -func (c *channel) close() { - c.pending.eof() - c.extPending.eof() - close(c.msg) - close(c.incomingRequests) - c.writeMu.Lock() - // This is not necessary for a normal channel teardown, but if - // there was another error, it is. - c.sentClose = true - c.writeMu.Unlock() - // Unblock writers. - c.remoteWin.close() -} - -// responseMessageReceived is called when a success or failure message is -// received on a channel to check that such a message is reasonable for the -// given channel. -func (c *channel) responseMessageReceived() error { - if c.direction == channelInbound { - return errors.New("ssh: channel response message received on inbound channel") - } - if c.decided { - return errors.New("ssh: duplicate response received for channel") - } - c.decided = true - return nil -} - -func (c *channel) handlePacket(packet []byte) error { - switch packet[0] { - case msgChannelData, msgChannelExtendedData: - return c.handleData(packet) - case msgChannelClose: - c.sendMessage(channelCloseMsg{PeersId: c.remoteId}) - c.mux.chanList.remove(c.localId) - c.close() - return nil - case msgChannelEOF: - // RFC 4254 is mute on how EOF affects dataExt messages but - // it is logical to signal EOF at the same time. - c.extPending.eof() - c.pending.eof() - return nil - } - - decoded, err := decode(packet) - if err != nil { - return err - } - - switch msg := decoded.(type) { - case *channelOpenFailureMsg: - if err := c.responseMessageReceived(); err != nil { - return err - } - c.mux.chanList.remove(msg.PeersId) - c.msg <- msg - case *channelOpenConfirmMsg: - if err := c.responseMessageReceived(); err != nil { - return err - } - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - return fmt.Errorf("ssh: invalid MaxPacketSize %d from peer", msg.MaxPacketSize) - } - c.remoteId = msg.MyId - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.MyWindow) - c.msg <- msg - case *windowAdjustMsg: - if !c.remoteWin.add(msg.AdditionalBytes) { - return fmt.Errorf("ssh: invalid window update for %d bytes", msg.AdditionalBytes) - } - case *channelRequestMsg: - req := Request{ - Type: msg.Request, - WantReply: msg.WantReply, - Payload: msg.RequestSpecificData, - ch: c, - } - - c.incomingRequests <- &req - default: - c.msg <- msg - } - return nil -} - -func (m *mux) newChannel(chanType string, direction channelDirection, extraData []byte) *channel { - ch := &channel{ - remoteWin: window{Cond: newCond()}, - myWindow: channelWindowSize, - pending: newBuffer(), - extPending: newBuffer(), - direction: direction, - incomingRequests: make(chan *Request, 16), - msg: make(chan interface{}, 16), - chanType: chanType, - extraData: extraData, - mux: m, - packetPool: make(map[uint32][]byte), - } - ch.localId = m.chanList.add(ch) - return ch -} - -var errUndecided = errors.New("ssh: must Accept or Reject channel") -var errDecidedAlready = errors.New("ssh: can call Accept or Reject only once") - -type extChannel struct { - code uint32 - ch *channel -} - -func (e *extChannel) Write(data []byte) (n int, err error) { - return e.ch.WriteExtended(data, e.code) -} - -func (e *extChannel) Read(data []byte) (n int, err error) { - return e.ch.ReadExtended(data, e.code) -} - -func (c *channel) Accept() (Channel, <-chan *Request, error) { - if c.decided { - return nil, nil, errDecidedAlready - } - c.maxIncomingPayload = channelMaxPacket - confirm := channelOpenConfirmMsg{ - PeersId: c.remoteId, - MyId: c.localId, - MyWindow: c.myWindow, - MaxPacketSize: c.maxIncomingPayload, - } - c.decided = true - if err := c.sendMessage(confirm); err != nil { - return nil, nil, err - } - - return c, c.incomingRequests, nil -} - -func (ch *channel) Reject(reason RejectionReason, message string) error { - if ch.decided { - return errDecidedAlready - } - reject := channelOpenFailureMsg{ - PeersId: ch.remoteId, - Reason: reason, - Message: message, - Language: "en", - } - ch.decided = true - return ch.sendMessage(reject) -} - -func (ch *channel) Read(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.ReadExtended(data, 0) -} - -func (ch *channel) Write(data []byte) (int, error) { - if !ch.decided { - return 0, errUndecided - } - return ch.WriteExtended(data, 0) -} - -func (ch *channel) CloseWrite() error { - if !ch.decided { - return errUndecided - } - ch.sentEOF = true - return ch.sendMessage(channelEOFMsg{ - PeersId: ch.remoteId}) -} - -func (ch *channel) Close() error { - if !ch.decided { - return errUndecided - } - - return ch.sendMessage(channelCloseMsg{ - PeersId: ch.remoteId}) -} - -// Extended returns an io.ReadWriter that sends and receives data on the given, -// SSH extended stream. Such streams are used, for example, for stderr. -func (ch *channel) Extended(code uint32) io.ReadWriter { - if !ch.decided { - return nil - } - return &extChannel{code, ch} -} - -func (ch *channel) Stderr() io.ReadWriter { - return ch.Extended(1) -} - -func (ch *channel) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - if !ch.decided { - return false, errUndecided - } - - if wantReply { - ch.sentRequestMu.Lock() - defer ch.sentRequestMu.Unlock() - } - - msg := channelRequestMsg{ - PeersId: ch.remoteId, - Request: name, - WantReply: wantReply, - RequestSpecificData: payload, - } - - if err := ch.sendMessage(msg); err != nil { - return false, err - } - - if wantReply { - m, ok := (<-ch.msg) - if !ok { - return false, io.EOF - } - switch m.(type) { - case *channelRequestFailureMsg: - return false, nil - case *channelRequestSuccessMsg: - return true, nil - default: - return false, fmt.Errorf("ssh: unexpected response to channel request: %#v", m) - } - } - - return false, nil -} - -// ackRequest either sends an ack or nack to the channel request. -func (ch *channel) ackRequest(ok bool) error { - if !ch.decided { - return errUndecided - } - - var msg interface{} - if !ok { - msg = channelRequestFailureMsg{ - PeersId: ch.remoteId, - } - } else { - msg = channelRequestSuccessMsg{ - PeersId: ch.remoteId, - } - } - return ch.sendMessage(msg) -} - -func (ch *channel) ChannelType() string { - return ch.chanType -} - -func (ch *channel) ExtraData() []byte { - return ch.extraData -} diff --git a/vendor/golang.org/x/crypto/ssh/cipher.go b/vendor/golang.org/x/crypto/ssh/cipher.go deleted file mode 100644 index 34d3917c4..000000000 --- a/vendor/golang.org/x/crypto/ssh/cipher.go +++ /dev/null @@ -1,579 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/aes" - "crypto/cipher" - "crypto/des" - "crypto/rc4" - "crypto/subtle" - "encoding/binary" - "errors" - "fmt" - "hash" - "io" - "io/ioutil" -) - -const ( - packetSizeMultiple = 16 // TODO(huin) this should be determined by the cipher. - - // RFC 4253 section 6.1 defines a minimum packet size of 32768 that implementations - // MUST be able to process (plus a few more kilobytes for padding and mac). The RFC - // indicates implementations SHOULD be able to handle larger packet sizes, but then - // waffles on about reasonable limits. - // - // OpenSSH caps their maxPacket at 256kB so we choose to do - // the same. maxPacket is also used to ensure that uint32 - // length fields do not overflow, so it should remain well - // below 4G. - maxPacket = 256 * 1024 -) - -// noneCipher implements cipher.Stream and provides no encryption. It is used -// by the transport before the first key-exchange. -type noneCipher struct{} - -func (c noneCipher) XORKeyStream(dst, src []byte) { - copy(dst, src) -} - -func newAESCTR(key, iv []byte) (cipher.Stream, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - return cipher.NewCTR(c, iv), nil -} - -func newRC4(key, iv []byte) (cipher.Stream, error) { - return rc4.NewCipher(key) -} - -type streamCipherMode struct { - keySize int - ivSize int - skip int - createFunc func(key, iv []byte) (cipher.Stream, error) -} - -func (c *streamCipherMode) createStream(key, iv []byte) (cipher.Stream, error) { - if len(key) < c.keySize { - panic("ssh: key length too small for cipher") - } - if len(iv) < c.ivSize { - panic("ssh: iv too small for cipher") - } - - stream, err := c.createFunc(key[:c.keySize], iv[:c.ivSize]) - if err != nil { - return nil, err - } - - var streamDump []byte - if c.skip > 0 { - streamDump = make([]byte, 512) - } - - for remainingToDump := c.skip; remainingToDump > 0; { - dumpThisTime := remainingToDump - if dumpThisTime > len(streamDump) { - dumpThisTime = len(streamDump) - } - stream.XORKeyStream(streamDump[:dumpThisTime], streamDump[:dumpThisTime]) - remainingToDump -= dumpThisTime - } - - return stream, nil -} - -// cipherModes documents properties of supported ciphers. Ciphers not included -// are not supported and will not be negotiated, even if explicitly requested in -// ClientConfig.Crypto.Ciphers. -var cipherModes = map[string]*streamCipherMode{ - // Ciphers from RFC4344, which introduced many CTR-based ciphers. Algorithms - // are defined in the order specified in the RFC. - "aes128-ctr": {16, aes.BlockSize, 0, newAESCTR}, - "aes192-ctr": {24, aes.BlockSize, 0, newAESCTR}, - "aes256-ctr": {32, aes.BlockSize, 0, newAESCTR}, - - // Ciphers from RFC4345, which introduces security-improved arcfour ciphers. - // They are defined in the order specified in the RFC. - "arcfour128": {16, 0, 1536, newRC4}, - "arcfour256": {32, 0, 1536, newRC4}, - - // Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol. - // Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and - // RC4) has problems with weak keys, and should be used with caution." - // RFC4345 introduces improved versions of Arcfour. - "arcfour": {16, 0, 0, newRC4}, - - // AES-GCM is not a stream cipher, so it is constructed with a - // special case. If we add any more non-stream ciphers, we - // should invest a cleaner way to do this. - gcmCipherID: {16, 12, 0, nil}, - - // CBC mode is insecure and so is not included in the default config. - // (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely - // needed, it's possible to specify a custom Config to enable it. - // You should expect that an active attacker can recover plaintext if - // you do. - aes128cbcID: {16, aes.BlockSize, 0, nil}, - - // 3des-cbc is insecure and is disabled by default. - tripledescbcID: {24, des.BlockSize, 0, nil}, -} - -// prefixLen is the length of the packet prefix that contains the packet length -// and number of padding bytes. -const prefixLen = 5 - -// streamPacketCipher is a packetCipher using a stream cipher. -type streamPacketCipher struct { - mac hash.Hash - cipher cipher.Stream - - // The following members are to avoid per-packet allocations. - prefix [prefixLen]byte - seqNumBytes [4]byte - padding [2 * packetSizeMultiple]byte - packetData []byte - macResult []byte -} - -// readPacket reads and decrypt a single packet from the reader argument. -func (s *streamPacketCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, s.prefix[:]); err != nil { - return nil, err - } - - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - length := binary.BigEndian.Uint32(s.prefix[0:4]) - paddingLength := uint32(s.prefix[4]) - - var macSize uint32 - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - s.mac.Write(s.prefix[:]) - macSize = uint32(s.mac.Size()) - } - - if length <= paddingLength+1 { - return nil, errors.New("ssh: invalid packet length, packet too small") - } - - if length > maxPacket { - return nil, errors.New("ssh: invalid packet length, packet too large") - } - - // the maxPacket check above ensures that length-1+macSize - // does not overflow. - if uint32(cap(s.packetData)) < length-1+macSize { - s.packetData = make([]byte, length-1+macSize) - } else { - s.packetData = s.packetData[:length-1+macSize] - } - - if _, err := io.ReadFull(r, s.packetData); err != nil { - return nil, err - } - mac := s.packetData[length-1:] - data := s.packetData[:length-1] - s.cipher.XORKeyStream(data, data) - - if s.mac != nil { - s.mac.Write(data) - s.macResult = s.mac.Sum(s.macResult[:0]) - if subtle.ConstantTimeCompare(s.macResult, mac) != 1 { - return nil, errors.New("ssh: MAC failure") - } - } - - return s.packetData[:length-paddingLength-1], nil -} - -// writePacket encrypts and sends a packet of data to the writer argument -func (s *streamPacketCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - if len(packet) > maxPacket { - return errors.New("ssh: packet too large") - } - - paddingLength := packetSizeMultiple - (prefixLen+len(packet))%packetSizeMultiple - if paddingLength < 4 { - paddingLength += packetSizeMultiple - } - - length := len(packet) + 1 + paddingLength - binary.BigEndian.PutUint32(s.prefix[:], uint32(length)) - s.prefix[4] = byte(paddingLength) - padding := s.padding[:paddingLength] - if _, err := io.ReadFull(rand, padding); err != nil { - return err - } - - if s.mac != nil { - s.mac.Reset() - binary.BigEndian.PutUint32(s.seqNumBytes[:], seqNum) - s.mac.Write(s.seqNumBytes[:]) - s.mac.Write(s.prefix[:]) - s.mac.Write(packet) - s.mac.Write(padding) - } - - s.cipher.XORKeyStream(s.prefix[:], s.prefix[:]) - s.cipher.XORKeyStream(packet, packet) - s.cipher.XORKeyStream(padding, padding) - - if _, err := w.Write(s.prefix[:]); err != nil { - return err - } - if _, err := w.Write(packet); err != nil { - return err - } - if _, err := w.Write(padding); err != nil { - return err - } - - if s.mac != nil { - s.macResult = s.mac.Sum(s.macResult[:0]) - if _, err := w.Write(s.macResult); err != nil { - return err - } - } - - return nil -} - -type gcmCipher struct { - aead cipher.AEAD - prefix [4]byte - iv []byte - buf []byte -} - -func newGCMCipher(iv, key, macKey []byte) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - aead, err := cipher.NewGCM(c) - if err != nil { - return nil, err - } - - return &gcmCipher{ - aead: aead, - iv: iv, - }, nil -} - -const gcmTagSize = 16 - -func (c *gcmCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - // Pad out to multiple of 16 bytes. This is different from the - // stream cipher because that encrypts the length too. - padding := byte(packetSizeMultiple - (1+len(packet))%packetSizeMultiple) - if padding < 4 { - padding += packetSizeMultiple - } - - length := uint32(len(packet) + int(padding) + 1) - binary.BigEndian.PutUint32(c.prefix[:], length) - if _, err := w.Write(c.prefix[:]); err != nil { - return err - } - - if cap(c.buf) < int(length) { - c.buf = make([]byte, length) - } else { - c.buf = c.buf[:length] - } - - c.buf[0] = padding - copy(c.buf[1:], packet) - if _, err := io.ReadFull(rand, c.buf[1+len(packet):]); err != nil { - return err - } - c.buf = c.aead.Seal(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if _, err := w.Write(c.buf); err != nil { - return err - } - c.incIV() - - return nil -} - -func (c *gcmCipher) incIV() { - for i := 4 + 7; i >= 4; i-- { - c.iv[i]++ - if c.iv[i] != 0 { - break - } - } -} - -func (c *gcmCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - if _, err := io.ReadFull(r, c.prefix[:]); err != nil { - return nil, err - } - length := binary.BigEndian.Uint32(c.prefix[:]) - if length > maxPacket { - return nil, errors.New("ssh: max packet length exceeded.") - } - - if cap(c.buf) < int(length+gcmTagSize) { - c.buf = make([]byte, length+gcmTagSize) - } else { - c.buf = c.buf[:length+gcmTagSize] - } - - if _, err := io.ReadFull(r, c.buf); err != nil { - return nil, err - } - - plain, err := c.aead.Open(c.buf[:0], c.iv, c.buf, c.prefix[:]) - if err != nil { - return nil, err - } - c.incIV() - - padding := plain[0] - if padding < 4 || padding >= 20 { - return nil, fmt.Errorf("ssh: illegal padding %d", padding) - } - - if int(padding+1) >= len(plain) { - return nil, fmt.Errorf("ssh: padding %d too large", padding) - } - plain = plain[1 : length-uint32(padding)] - return plain, nil -} - -// cbcCipher implements aes128-cbc cipher defined in RFC 4253 section 6.1 -type cbcCipher struct { - mac hash.Hash - macSize uint32 - decrypter cipher.BlockMode - encrypter cipher.BlockMode - - // The following members are to avoid per-packet allocations. - seqNumBytes [4]byte - packetData []byte - macResult []byte - - // Amount of data we should still read to hide which - // verification error triggered. - oracleCamouflage uint32 -} - -func newCBCCipher(c cipher.Block, iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - cbc := &cbcCipher{ - mac: macModes[algs.MAC].new(macKey), - decrypter: cipher.NewCBCDecrypter(c, iv), - encrypter: cipher.NewCBCEncrypter(c, iv), - packetData: make([]byte, 1024), - } - if cbc.mac != nil { - cbc.macSize = uint32(cbc.mac.Size()) - } - - return cbc, nil -} - -func newAESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := aes.NewCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, iv, key, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func newTripleDESCBCCipher(iv, key, macKey []byte, algs directionAlgorithms) (packetCipher, error) { - c, err := des.NewTripleDESCipher(key) - if err != nil { - return nil, err - } - - cbc, err := newCBCCipher(c, iv, key, macKey, algs) - if err != nil { - return nil, err - } - - return cbc, nil -} - -func maxUInt32(a, b int) uint32 { - if a > b { - return uint32(a) - } - return uint32(b) -} - -const ( - cbcMinPacketSizeMultiple = 8 - cbcMinPacketSize = 16 - cbcMinPaddingSize = 4 -) - -// cbcError represents a verification error that may leak information. -type cbcError string - -func (e cbcError) Error() string { return string(e) } - -func (c *cbcCipher) readPacket(seqNum uint32, r io.Reader) ([]byte, error) { - p, err := c.readPacketLeaky(seqNum, r) - if err != nil { - if _, ok := err.(cbcError); ok { - // Verification error: read a fixed amount of - // data, to make distinguishing between - // failing MAC and failing length check more - // difficult. - io.CopyN(ioutil.Discard, r, int64(c.oracleCamouflage)) - } - } - return p, err -} - -func (c *cbcCipher) readPacketLeaky(seqNum uint32, r io.Reader) ([]byte, error) { - blockSize := c.decrypter.BlockSize() - - // Read the header, which will include some of the subsequent data in the - // case of block ciphers - this is copied back to the payload later. - // How many bytes of payload/padding will be read with this first read. - firstBlockLength := uint32((prefixLen + blockSize - 1) / blockSize * blockSize) - firstBlock := c.packetData[:firstBlockLength] - if _, err := io.ReadFull(r, firstBlock); err != nil { - return nil, err - } - - c.oracleCamouflage = maxPacket + 4 + c.macSize - firstBlockLength - - c.decrypter.CryptBlocks(firstBlock, firstBlock) - length := binary.BigEndian.Uint32(firstBlock[:4]) - if length > maxPacket { - return nil, cbcError("ssh: packet too large") - } - if length+4 < maxUInt32(cbcMinPacketSize, blockSize) { - // The minimum size of a packet is 16 (or the cipher block size, whichever - // is larger) bytes. - return nil, cbcError("ssh: packet too small") - } - // The length of the packet (including the length field but not the MAC) must - // be a multiple of the block size or 8, whichever is larger. - if (length+4)%maxUInt32(cbcMinPacketSizeMultiple, blockSize) != 0 { - return nil, cbcError("ssh: invalid packet length multiple") - } - - paddingLength := uint32(firstBlock[4]) - if paddingLength < cbcMinPaddingSize || length <= paddingLength+1 { - return nil, cbcError("ssh: invalid packet length") - } - - // Positions within the c.packetData buffer: - macStart := 4 + length - paddingStart := macStart - paddingLength - - // Entire packet size, starting before length, ending at end of mac. - entirePacketSize := macStart + c.macSize - - // Ensure c.packetData is large enough for the entire packet data. - if uint32(cap(c.packetData)) < entirePacketSize { - // Still need to upsize and copy, but this should be rare at runtime, only - // on upsizing the packetData buffer. - c.packetData = make([]byte, entirePacketSize) - copy(c.packetData, firstBlock) - } else { - c.packetData = c.packetData[:entirePacketSize] - } - - if n, err := io.ReadFull(r, c.packetData[firstBlockLength:]); err != nil { - return nil, err - } else { - c.oracleCamouflage -= uint32(n) - } - - remainingCrypted := c.packetData[firstBlockLength:macStart] - c.decrypter.CryptBlocks(remainingCrypted, remainingCrypted) - - mac := c.packetData[macStart:] - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData[:macStart]) - c.macResult = c.mac.Sum(c.macResult[:0]) - if subtle.ConstantTimeCompare(c.macResult, mac) != 1 { - return nil, cbcError("ssh: MAC failure") - } - } - - return c.packetData[prefixLen:paddingStart], nil -} - -func (c *cbcCipher) writePacket(seqNum uint32, w io.Writer, rand io.Reader, packet []byte) error { - effectiveBlockSize := maxUInt32(cbcMinPacketSizeMultiple, c.encrypter.BlockSize()) - - // Length of encrypted portion of the packet (header, payload, padding). - // Enforce minimum padding and packet size. - encLength := maxUInt32(prefixLen+len(packet)+cbcMinPaddingSize, cbcMinPaddingSize) - // Enforce block size. - encLength = (encLength + effectiveBlockSize - 1) / effectiveBlockSize * effectiveBlockSize - - length := encLength - 4 - paddingLength := int(length) - (1 + len(packet)) - - // Overall buffer contains: header, payload, padding, mac. - // Space for the MAC is reserved in the capacity but not the slice length. - bufferSize := encLength + c.macSize - if uint32(cap(c.packetData)) < bufferSize { - c.packetData = make([]byte, encLength, bufferSize) - } else { - c.packetData = c.packetData[:encLength] - } - - p := c.packetData - - // Packet header. - binary.BigEndian.PutUint32(p, length) - p = p[4:] - p[0] = byte(paddingLength) - - // Payload. - p = p[1:] - copy(p, packet) - - // Padding. - p = p[len(packet):] - if _, err := io.ReadFull(rand, p); err != nil { - return err - } - - if c.mac != nil { - c.mac.Reset() - binary.BigEndian.PutUint32(c.seqNumBytes[:], seqNum) - c.mac.Write(c.seqNumBytes[:]) - c.mac.Write(c.packetData) - // The MAC is now appended into the capacity reserved for it earlier. - c.packetData = c.mac.Sum(c.packetData) - } - - c.encrypter.CryptBlocks(c.packetData[:encLength], c.packetData[:encLength]) - - if _, err := w.Write(c.packetData); err != nil { - return err - } - - return nil -} diff --git a/vendor/golang.org/x/crypto/ssh/client.go b/vendor/golang.org/x/crypto/ssh/client.go deleted file mode 100644 index 0212a20c9..000000000 --- a/vendor/golang.org/x/crypto/ssh/client.go +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "net" - "sync" - "time" -) - -// Client implements a traditional SSH client that supports shells, -// subprocesses, port forwarding and tunneled dialing. -type Client struct { - Conn - - forwards forwardList // forwarded tcpip connections from the remote side - mu sync.Mutex - channelHandlers map[string]chan NewChannel -} - -// HandleChannelOpen returns a channel on which NewChannel requests -// for the given type are sent. If the type already is being handled, -// nil is returned. The channel is closed when the connection is closed. -func (c *Client) HandleChannelOpen(channelType string) <-chan NewChannel { - c.mu.Lock() - defer c.mu.Unlock() - if c.channelHandlers == nil { - // The SSH channel has been closed. - c := make(chan NewChannel) - close(c) - return c - } - - ch := c.channelHandlers[channelType] - if ch != nil { - return nil - } - - ch = make(chan NewChannel, 16) - c.channelHandlers[channelType] = ch - return ch -} - -// NewClient creates a Client on top of the given connection. -func NewClient(c Conn, chans <-chan NewChannel, reqs <-chan *Request) *Client { - conn := &Client{ - Conn: c, - channelHandlers: make(map[string]chan NewChannel, 1), - } - - go conn.handleGlobalRequests(reqs) - go conn.handleChannelOpens(chans) - go func() { - conn.Wait() - conn.forwards.closeAll() - }() - go conn.forwards.handleChannels(conn.HandleChannelOpen("forwarded-tcpip")) - return conn -} - -// NewClientConn establishes an authenticated SSH connection using c -// as the underlying transport. The Request and NewChannel channels -// must be serviced or the connection will hang. -func NewClientConn(c net.Conn, addr string, config *ClientConfig) (Conn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - conn := &connection{ - sshConn: sshConn{conn: c}, - } - - if err := conn.clientHandshake(addr, &fullConf); err != nil { - c.Close() - return nil, nil, nil, fmt.Errorf("ssh: handshake failed: %v", err) - } - conn.mux = newMux(conn.transport) - return conn, conn.mux.incomingChannels, conn.mux.incomingRequests, nil -} - -// clientHandshake performs the client side key exchange. See RFC 4253 Section -// 7. -func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) error { - if config.ClientVersion != "" { - c.clientVersion = []byte(config.ClientVersion) - } else { - c.clientVersion = []byte(packageVersion) - } - var err error - c.serverVersion, err = exchangeVersions(c.sshConn.conn, c.clientVersion) - if err != nil { - return err - } - - c.transport = newClientTransport( - newTransport(c.sshConn.conn, config.Rand, true /* is client */), - c.clientVersion, c.serverVersion, config, dialAddress, c.sshConn.RemoteAddr()) - if err := c.transport.requestInitialKeyChange(); err != nil { - return err - } - - // We just did the key change, so the session ID is established. - c.sessionID = c.transport.getSessionID() - - return c.clientAuthenticate(config) -} - -// verifyHostKeySignature verifies the host key obtained in the key -// exchange. -func verifyHostKeySignature(hostKey PublicKey, result *kexResult) error { - sig, rest, ok := parseSignatureBody(result.Signature) - if len(rest) > 0 || !ok { - return errors.New("ssh: signature parse error") - } - - return hostKey.Verify(result.H, sig) -} - -// NewSession opens a new Session for this client. (A session is a remote -// execution of a program.) -func (c *Client) NewSession() (*Session, error) { - ch, in, err := c.OpenChannel("session", nil) - if err != nil { - return nil, err - } - return newSession(ch, in) -} - -func (c *Client) handleGlobalRequests(incoming <-chan *Request) { - for r := range incoming { - // This handles keepalive messages and matches - // the behaviour of OpenSSH. - r.Reply(false, nil) - } -} - -// handleChannelOpens channel open messages from the remote side. -func (c *Client) handleChannelOpens(in <-chan NewChannel) { - for ch := range in { - c.mu.Lock() - handler := c.channelHandlers[ch.ChannelType()] - c.mu.Unlock() - - if handler != nil { - handler <- ch - } else { - ch.Reject(UnknownChannelType, fmt.Sprintf("unknown channel type: %v", ch.ChannelType())) - } - } - - c.mu.Lock() - for _, ch := range c.channelHandlers { - close(ch) - } - c.channelHandlers = nil - c.mu.Unlock() -} - -// Dial starts a client connection to the given SSH server. It is a -// convenience function that connects to the given network address, -// initiates the SSH handshake, and then sets up a Client. For access -// to incoming channels and requests, use net.Dial with NewClientConn -// instead. -func Dial(network, addr string, config *ClientConfig) (*Client, error) { - conn, err := net.DialTimeout(network, addr, config.Timeout) - if err != nil { - return nil, err - } - c, chans, reqs, err := NewClientConn(conn, addr, config) - if err != nil { - return nil, err - } - return NewClient(c, chans, reqs), nil -} - -// A ClientConfig structure is used to configure a Client. It must not be -// modified after having been passed to an SSH function. -type ClientConfig struct { - // Config contains configuration that is shared between clients and - // servers. - Config - - // User contains the username to authenticate as. - User string - - // Auth contains possible authentication methods to use with the - // server. Only the first instance of a particular RFC 4252 method will - // be used during authentication. - Auth []AuthMethod - - // HostKeyCallback, if not nil, is called during the cryptographic - // handshake to validate the server's host key. A nil HostKeyCallback - // implies that all host keys are accepted. - HostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - - // ClientVersion contains the version identification string that will - // be used for the connection. If empty, a reasonable default is used. - ClientVersion string - - // HostKeyAlgorithms lists the key types that the client will - // accept from the server as host key, in order of - // preference. If empty, a reasonable default is used. Any - // string returned from PublicKey.Type method may be used, or - // any of the CertAlgoXxxx and KeyAlgoXxxx constants. - HostKeyAlgorithms []string - - // Timeout is the maximum amount of time for the TCP connection to establish. - // - // A Timeout of zero means no timeout. - Timeout time.Duration -} diff --git a/vendor/golang.org/x/crypto/ssh/client_auth.go b/vendor/golang.org/x/crypto/ssh/client_auth.go deleted file mode 100644 index 294af0d48..000000000 --- a/vendor/golang.org/x/crypto/ssh/client_auth.go +++ /dev/null @@ -1,473 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" -) - -// clientAuthenticate authenticates with the remote server. See RFC 4252. -func (c *connection) clientAuthenticate(config *ClientConfig) error { - // initiate user auth session - if err := c.transport.writePacket(Marshal(&serviceRequestMsg{serviceUserAuth})); err != nil { - return err - } - packet, err := c.transport.readPacket() - if err != nil { - return err - } - var serviceAccept serviceAcceptMsg - if err := Unmarshal(packet, &serviceAccept); err != nil { - return err - } - - // during the authentication phase the client first attempts the "none" method - // then any untried methods suggested by the server. - tried := make(map[string]bool) - var lastMethods []string - for auth := AuthMethod(new(noneAuth)); auth != nil; { - ok, methods, err := auth.auth(c.transport.getSessionID(), config.User, c.transport, config.Rand) - if err != nil { - return err - } - if ok { - // success - return nil - } - tried[auth.method()] = true - if methods == nil { - methods = lastMethods - } - lastMethods = methods - - auth = nil - - findNext: - for _, a := range config.Auth { - candidateMethod := a.method() - if tried[candidateMethod] { - continue - } - for _, meth := range methods { - if meth == candidateMethod { - auth = a - break findNext - } - } - } - } - return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried)) -} - -func keys(m map[string]bool) []string { - s := make([]string, 0, len(m)) - - for key := range m { - s = append(s, key) - } - return s -} - -// An AuthMethod represents an instance of an RFC 4252 authentication method. -type AuthMethod interface { - // auth authenticates user over transport t. - // Returns true if authentication is successful. - // If authentication is not successful, a []string of alternative - // method names is returned. If the slice is nil, it will be ignored - // and the previous set of possible methods will be reused. - auth(session []byte, user string, p packetConn, rand io.Reader) (bool, []string, error) - - // method returns the RFC 4252 method name. - method() string -} - -// "none" authentication, RFC 4252 section 5.2. -type noneAuth int - -func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - if err := c.writePacket(Marshal(&userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: "none", - })); err != nil { - return false, nil, err - } - - return handleAuthResponse(c) -} - -func (n *noneAuth) method() string { - return "none" -} - -// passwordCallback is an AuthMethod that fetches the password through -// a function call, e.g. by prompting the user. -type passwordCallback func() (password string, err error) - -func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - type passwordAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - Reply bool - Password string - } - - pw, err := cb() - // REVIEW NOTE: is there a need to support skipping a password attempt? - // The program may only find out that the user doesn't have a password - // when prompting. - if err != nil { - return false, nil, err - } - - if err := c.writePacket(Marshal(&passwordAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - Reply: false, - Password: pw, - })); err != nil { - return false, nil, err - } - - return handleAuthResponse(c) -} - -func (cb passwordCallback) method() string { - return "password" -} - -// Password returns an AuthMethod using the given password. -func Password(secret string) AuthMethod { - return passwordCallback(func() (string, error) { return secret, nil }) -} - -// PasswordCallback returns an AuthMethod that uses a callback for -// fetching a password. -func PasswordCallback(prompt func() (secret string, err error)) AuthMethod { - return passwordCallback(prompt) -} - -type publickeyAuthMsg struct { - User string `sshtype:"50"` - Service string - Method string - // HasSig indicates to the receiver packet that the auth request is signed and - // should be used for authentication of the request. - HasSig bool - Algoname string - PubKey []byte - // Sig is tagged with "rest" so Marshal will exclude it during - // validateKey - Sig []byte `ssh:"rest"` -} - -// publicKeyCallback is an AuthMethod that uses a set of key -// pairs for authentication. -type publicKeyCallback func() ([]Signer, error) - -func (cb publicKeyCallback) method() string { - return "publickey" -} - -func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - // Authentication is performed in two stages. The first stage sends an - // enquiry to test if each key is acceptable to the remote. The second - // stage attempts to authenticate with the valid keys obtained in the - // first stage. - - signers, err := cb() - if err != nil { - return false, nil, err - } - var validKeys []Signer - for _, signer := range signers { - if ok, err := validateKey(signer.PublicKey(), user, c); ok { - validKeys = append(validKeys, signer) - } else { - if err != nil { - return false, nil, err - } - } - } - - // methods that may continue if this auth is not successful. - var methods []string - for _, signer := range validKeys { - pub := signer.PublicKey() - - pubKey := pub.Marshal() - sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - }, []byte(pub.Type()), pubKey)) - if err != nil { - return false, nil, err - } - - // manually wrap the serialized signature in a string - s := Marshal(sign) - sig := make([]byte, stringLength(len(s))) - marshalString(sig, s) - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: cb.method(), - HasSig: true, - Algoname: pub.Type(), - PubKey: pubKey, - Sig: sig, - } - p := Marshal(&msg) - if err := c.writePacket(p); err != nil { - return false, nil, err - } - var success bool - success, methods, err = handleAuthResponse(c) - if err != nil { - return false, nil, err - } - if success { - return success, methods, err - } - } - return false, methods, nil -} - -// validateKey validates the key provided is acceptable to the server. -func validateKey(key PublicKey, user string, c packetConn) (bool, error) { - pubKey := key.Marshal() - msg := publickeyAuthMsg{ - User: user, - Service: serviceSSH, - Method: "publickey", - HasSig: false, - Algoname: key.Type(), - PubKey: pubKey, - } - if err := c.writePacket(Marshal(&msg)); err != nil { - return false, err - } - - return confirmKeyAck(key, c) -} - -func confirmKeyAck(key PublicKey, c packetConn) (bool, error) { - pubKey := key.Marshal() - algoname := key.Type() - - for { - packet, err := c.readPacket() - if err != nil { - return false, err - } - switch packet[0] { - case msgUserAuthBanner: - // TODO(gpaul): add callback to present the banner to the user - case msgUserAuthPubKeyOk: - var msg userAuthPubKeyOkMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, err - } - if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) { - return false, nil - } - return true, nil - case msgUserAuthFailure: - return false, nil - default: - return false, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// PublicKeys returns an AuthMethod that uses the given key -// pairs. -func PublicKeys(signers ...Signer) AuthMethod { - return publicKeyCallback(func() ([]Signer, error) { return signers, nil }) -} - -// PublicKeysCallback returns an AuthMethod that runs the given -// function to obtain a list of key pairs. -func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMethod { - return publicKeyCallback(getSigners) -} - -// handleAuthResponse returns whether the preceding authentication request succeeded -// along with a list of remaining authentication methods to try next and -// an error if an unexpected response was received. -func handleAuthResponse(c packetConn) (bool, []string, error) { - for { - packet, err := c.readPacket() - if err != nil { - return false, nil, err - } - - switch packet[0] { - case msgUserAuthBanner: - // TODO: add callback to present the banner to the user - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, nil, err - } - return false, msg.Methods, nil - case msgUserAuthSuccess: - return true, nil, nil - default: - return false, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0]) - } - } -} - -// KeyboardInteractiveChallenge should print questions, optionally -// disabling echoing (e.g. for passwords), and return all the answers. -// Challenge may be called multiple times in a single session. After -// successful authentication, the server may send a challenge with no -// questions, for which the user and instruction messages should be -// printed. RFC 4256 section 3.3 details how the UI should behave for -// both CLI and GUI environments. -type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error) - -// KeyboardInteractive returns a AuthMethod using a prompt/response -// sequence controlled by the server. -func KeyboardInteractive(challenge KeyboardInteractiveChallenge) AuthMethod { - return challenge -} - -func (cb KeyboardInteractiveChallenge) method() string { - return "keyboard-interactive" -} - -func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (bool, []string, error) { - type initiateMsg struct { - User string `sshtype:"50"` - Service string - Method string - Language string - Submethods string - } - - if err := c.writePacket(Marshal(&initiateMsg{ - User: user, - Service: serviceSSH, - Method: "keyboard-interactive", - })); err != nil { - return false, nil, err - } - - for { - packet, err := c.readPacket() - if err != nil { - return false, nil, err - } - - // like handleAuthResponse, but with less options. - switch packet[0] { - case msgUserAuthBanner: - // TODO: Print banners during userauth. - continue - case msgUserAuthInfoRequest: - // OK - case msgUserAuthFailure: - var msg userAuthFailureMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, nil, err - } - return false, msg.Methods, nil - case msgUserAuthSuccess: - return true, nil, nil - default: - return false, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0]) - } - - var msg userAuthInfoRequestMsg - if err := Unmarshal(packet, &msg); err != nil { - return false, nil, err - } - - // Manually unpack the prompt/echo pairs. - rest := msg.Prompts - var prompts []string - var echos []bool - for i := 0; i < int(msg.NumPrompts); i++ { - prompt, r, ok := parseString(rest) - if !ok || len(r) == 0 { - return false, nil, errors.New("ssh: prompt format error") - } - prompts = append(prompts, string(prompt)) - echos = append(echos, r[0] != 0) - rest = r[1:] - } - - if len(rest) != 0 { - return false, nil, errors.New("ssh: extra data following keyboard-interactive pairs") - } - - answers, err := cb(msg.User, msg.Instruction, prompts, echos) - if err != nil { - return false, nil, err - } - - if len(answers) != len(prompts) { - return false, nil, errors.New("ssh: not enough answers from keyboard-interactive callback") - } - responseLength := 1 + 4 - for _, a := range answers { - responseLength += stringLength(len(a)) - } - serialized := make([]byte, responseLength) - p := serialized - p[0] = msgUserAuthInfoResponse - p = p[1:] - p = marshalUint32(p, uint32(len(answers))) - for _, a := range answers { - p = marshalString(p, []byte(a)) - } - - if err := c.writePacket(serialized); err != nil { - return false, nil, err - } - } -} - -type retryableAuthMethod struct { - authMethod AuthMethod - maxTries int -} - -func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok bool, methods []string, err error) { - for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ { - ok, methods, err = r.authMethod.auth(session, user, c, rand) - if ok || err != nil { // either success or error terminate - return ok, methods, err - } - } - return ok, methods, err -} - -func (r *retryableAuthMethod) method() string { - return r.authMethod.method() -} - -// RetryableAuthMethod is a decorator for other auth methods enabling them to -// be retried up to maxTries before considering that AuthMethod itself failed. -// If maxTries is <= 0, will retry indefinitely -// -// This is useful for interactive clients using challenge/response type -// authentication (e.g. Keyboard-Interactive, Password, etc) where the user -// could mistype their response resulting in the server issuing a -// SSH_MSG_USERAUTH_FAILURE (rfc4252 #8 [password] and rfc4256 #3.4 -// [keyboard-interactive]); Without this decorator, the non-retryable -// AuthMethod would be removed from future consideration, and never tried again -// (and so the user would never be able to retry their entry). -func RetryableAuthMethod(auth AuthMethod, maxTries int) AuthMethod { - return &retryableAuthMethod{authMethod: auth, maxTries: maxTries} -} diff --git a/vendor/golang.org/x/crypto/ssh/common.go b/vendor/golang.org/x/crypto/ssh/common.go deleted file mode 100644 index 2c72ab544..000000000 --- a/vendor/golang.org/x/crypto/ssh/common.go +++ /dev/null @@ -1,356 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/rand" - "fmt" - "io" - "sync" - - _ "crypto/sha1" - _ "crypto/sha256" - _ "crypto/sha512" -) - -// These are string constants in the SSH protocol. -const ( - compressionNone = "none" - serviceUserAuth = "ssh-userauth" - serviceSSH = "ssh-connection" -) - -// supportedCiphers specifies the supported ciphers in preference order. -var supportedCiphers = []string{ - "aes128-ctr", "aes192-ctr", "aes256-ctr", - "aes128-gcm@openssh.com", - "arcfour256", "arcfour128", -} - -// supportedKexAlgos specifies the supported key-exchange algorithms in -// preference order. -var supportedKexAlgos = []string{ - kexAlgoCurve25519SHA256, - // P384 and P521 are not constant-time yet, but since we don't - // reuse ephemeral keys, using them for ECDH should be OK. - kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521, - kexAlgoDH14SHA1, kexAlgoDH1SHA1, -} - -// supportedKexAlgos specifies the supported host-key algorithms (i.e. methods -// of authenticating servers) in preference order. -var supportedHostKeyAlgos = []string{ - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, - CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01, - - KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, - KeyAlgoRSA, KeyAlgoDSA, - - KeyAlgoED25519, -} - -// supportedMACs specifies a default set of MAC algorithms in preference order. -// This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed -// because they have reached the end of their useful life. -var supportedMACs = []string{ - "hmac-sha2-256", "hmac-sha1", "hmac-sha1-96", -} - -var supportedCompressions = []string{compressionNone} - -// hashFuncs keeps the mapping of supported algorithms to their respective -// hashes needed for signature verification. -var hashFuncs = map[string]crypto.Hash{ - KeyAlgoRSA: crypto.SHA1, - KeyAlgoDSA: crypto.SHA1, - KeyAlgoECDSA256: crypto.SHA256, - KeyAlgoECDSA384: crypto.SHA384, - KeyAlgoECDSA521: crypto.SHA512, - CertAlgoRSAv01: crypto.SHA1, - CertAlgoDSAv01: crypto.SHA1, - CertAlgoECDSA256v01: crypto.SHA256, - CertAlgoECDSA384v01: crypto.SHA384, - CertAlgoECDSA521v01: crypto.SHA512, -} - -// unexpectedMessageError results when the SSH message that we received didn't -// match what we wanted. -func unexpectedMessageError(expected, got uint8) error { - return fmt.Errorf("ssh: unexpected message type %d (expected %d)", got, expected) -} - -// parseError results from a malformed SSH message. -func parseError(tag uint8) error { - return fmt.Errorf("ssh: parse error in message type %d", tag) -} - -func findCommon(what string, client []string, server []string) (common string, err error) { - for _, c := range client { - for _, s := range server { - if c == s { - return c, nil - } - } - } - return "", fmt.Errorf("ssh: no common algorithm for %s; client offered: %v, server offered: %v", what, client, server) -} - -type directionAlgorithms struct { - Cipher string - MAC string - Compression string -} - -type algorithms struct { - kex string - hostKey string - w directionAlgorithms - r directionAlgorithms -} - -func findAgreedAlgorithms(clientKexInit, serverKexInit *kexInitMsg) (algs *algorithms, err error) { - result := &algorithms{} - - result.kex, err = findCommon("key exchange", clientKexInit.KexAlgos, serverKexInit.KexAlgos) - if err != nil { - return - } - - result.hostKey, err = findCommon("host key", clientKexInit.ServerHostKeyAlgos, serverKexInit.ServerHostKeyAlgos) - if err != nil { - return - } - - result.w.Cipher, err = findCommon("client to server cipher", clientKexInit.CiphersClientServer, serverKexInit.CiphersClientServer) - if err != nil { - return - } - - result.r.Cipher, err = findCommon("server to client cipher", clientKexInit.CiphersServerClient, serverKexInit.CiphersServerClient) - if err != nil { - return - } - - result.w.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer) - if err != nil { - return - } - - result.r.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient) - if err != nil { - return - } - - result.w.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer) - if err != nil { - return - } - - result.r.Compression, err = findCommon("server to client compression", clientKexInit.CompressionServerClient, serverKexInit.CompressionServerClient) - if err != nil { - return - } - - return result, nil -} - -// If rekeythreshold is too small, we can't make any progress sending -// stuff. -const minRekeyThreshold uint64 = 256 - -// Config contains configuration data common to both ServerConfig and -// ClientConfig. -type Config struct { - // Rand provides the source of entropy for cryptographic - // primitives. If Rand is nil, the cryptographic random reader - // in package crypto/rand will be used. - Rand io.Reader - - // The maximum number of bytes sent or received after which a - // new key is negotiated. It must be at least 256. If - // unspecified, 1 gigabyte is used. - RekeyThreshold uint64 - - // The allowed key exchanges algorithms. If unspecified then a - // default set of algorithms is used. - KeyExchanges []string - - // The allowed cipher algorithms. If unspecified then a sensible - // default is used. - Ciphers []string - - // The allowed MAC algorithms. If unspecified then a sensible default - // is used. - MACs []string -} - -// SetDefaults sets sensible values for unset fields in config. This is -// exported for testing: Configs passed to SSH functions are copied and have -// default values set automatically. -func (c *Config) SetDefaults() { - if c.Rand == nil { - c.Rand = rand.Reader - } - if c.Ciphers == nil { - c.Ciphers = supportedCiphers - } - var ciphers []string - for _, c := range c.Ciphers { - if cipherModes[c] != nil { - // reject the cipher if we have no cipherModes definition - ciphers = append(ciphers, c) - } - } - c.Ciphers = ciphers - - if c.KeyExchanges == nil { - c.KeyExchanges = supportedKexAlgos - } - - if c.MACs == nil { - c.MACs = supportedMACs - } - - if c.RekeyThreshold == 0 { - // RFC 4253, section 9 suggests rekeying after 1G. - c.RekeyThreshold = 1 << 30 - } - if c.RekeyThreshold < minRekeyThreshold { - c.RekeyThreshold = minRekeyThreshold - } -} - -// buildDataSignedForAuth returns the data that is signed in order to prove -// possession of a private key. See RFC 4252, section 7. -func buildDataSignedForAuth(sessionId []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte { - data := struct { - Session []byte - Type byte - User string - Service string - Method string - Sign bool - Algo []byte - PubKey []byte - }{ - sessionId, - msgUserAuthRequest, - req.User, - req.Service, - req.Method, - true, - algo, - pubKey, - } - return Marshal(data) -} - -func appendU16(buf []byte, n uint16) []byte { - return append(buf, byte(n>>8), byte(n)) -} - -func appendU32(buf []byte, n uint32) []byte { - return append(buf, byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendU64(buf []byte, n uint64) []byte { - return append(buf, - byte(n>>56), byte(n>>48), byte(n>>40), byte(n>>32), - byte(n>>24), byte(n>>16), byte(n>>8), byte(n)) -} - -func appendInt(buf []byte, n int) []byte { - return appendU32(buf, uint32(n)) -} - -func appendString(buf []byte, s string) []byte { - buf = appendU32(buf, uint32(len(s))) - buf = append(buf, s...) - return buf -} - -func appendBool(buf []byte, b bool) []byte { - if b { - return append(buf, 1) - } - return append(buf, 0) -} - -// newCond is a helper to hide the fact that there is no usable zero -// value for sync.Cond. -func newCond() *sync.Cond { return sync.NewCond(new(sync.Mutex)) } - -// window represents the buffer available to clients -// wishing to write to a channel. -type window struct { - *sync.Cond - win uint32 // RFC 4254 5.2 says the window size can grow to 2^32-1 - writeWaiters int - closed bool -} - -// add adds win to the amount of window available -// for consumers. -func (w *window) add(win uint32) bool { - // a zero sized window adjust is a noop. - if win == 0 { - return true - } - w.L.Lock() - if w.win+win < win { - w.L.Unlock() - return false - } - w.win += win - // It is unusual that multiple goroutines would be attempting to reserve - // window space, but not guaranteed. Use broadcast to notify all waiters - // that additional window is available. - w.Broadcast() - w.L.Unlock() - return true -} - -// close sets the window to closed, so all reservations fail -// immediately. -func (w *window) close() { - w.L.Lock() - w.closed = true - w.Broadcast() - w.L.Unlock() -} - -// reserve reserves win from the available window capacity. -// If no capacity remains, reserve will block. reserve may -// return less than requested. -func (w *window) reserve(win uint32) (uint32, error) { - var err error - w.L.Lock() - w.writeWaiters++ - w.Broadcast() - for w.win == 0 && !w.closed { - w.Wait() - } - w.writeWaiters-- - if w.win < win { - win = w.win - } - w.win -= win - if w.closed { - err = io.EOF - } - w.L.Unlock() - return win, err -} - -// waitWriterBlocked waits until some goroutine is blocked for further -// writes. It is used in tests only. -func (w *window) waitWriterBlocked() { - w.Cond.L.Lock() - for w.writeWaiters == 0 { - w.Cond.Wait() - } - w.Cond.L.Unlock() -} diff --git a/vendor/golang.org/x/crypto/ssh/connection.go b/vendor/golang.org/x/crypto/ssh/connection.go deleted file mode 100644 index e786f2f9a..000000000 --- a/vendor/golang.org/x/crypto/ssh/connection.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "fmt" - "net" -) - -// OpenChannelError is returned if the other side rejects an -// OpenChannel request. -type OpenChannelError struct { - Reason RejectionReason - Message string -} - -func (e *OpenChannelError) Error() string { - return fmt.Sprintf("ssh: rejected: %s (%s)", e.Reason, e.Message) -} - -// ConnMetadata holds metadata for the connection. -type ConnMetadata interface { - // User returns the user ID for this connection. - User() string - - // SessionID returns the sesson hash, also denoted by H. - SessionID() []byte - - // ClientVersion returns the client's version string as hashed - // into the session ID. - ClientVersion() []byte - - // ServerVersion returns the server's version string as hashed - // into the session ID. - ServerVersion() []byte - - // RemoteAddr returns the remote address for this connection. - RemoteAddr() net.Addr - - // LocalAddr returns the local address for this connection. - LocalAddr() net.Addr -} - -// Conn represents an SSH connection for both server and client roles. -// Conn is the basis for implementing an application layer, such -// as ClientConn, which implements the traditional shell access for -// clients. -type Conn interface { - ConnMetadata - - // SendRequest sends a global request, and returns the - // reply. If wantReply is true, it returns the response status - // and payload. See also RFC4254, section 4. - SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) - - // OpenChannel tries to open an channel. If the request is - // rejected, it returns *OpenChannelError. On success it returns - // the SSH Channel and a Go channel for incoming, out-of-band - // requests. The Go channel must be serviced, or the - // connection will hang. - OpenChannel(name string, data []byte) (Channel, <-chan *Request, error) - - // Close closes the underlying network connection - Close() error - - // Wait blocks until the connection has shut down, and returns the - // error causing the shutdown. - Wait() error - - // TODO(hanwen): consider exposing: - // RequestKeyChange - // Disconnect -} - -// DiscardRequests consumes and rejects all requests from the -// passed-in channel. -func DiscardRequests(in <-chan *Request) { - for req := range in { - if req.WantReply { - req.Reply(false, nil) - } - } -} - -// A connection represents an incoming connection. -type connection struct { - transport *handshakeTransport - sshConn - - // The connection protocol. - *mux -} - -func (c *connection) Close() error { - return c.sshConn.conn.Close() -} - -// sshconn provides net.Conn metadata, but disallows direct reads and -// writes. -type sshConn struct { - conn net.Conn - - user string - sessionID []byte - clientVersion []byte - serverVersion []byte -} - -func dup(src []byte) []byte { - dst := make([]byte, len(src)) - copy(dst, src) - return dst -} - -func (c *sshConn) User() string { - return c.user -} - -func (c *sshConn) RemoteAddr() net.Addr { - return c.conn.RemoteAddr() -} - -func (c *sshConn) Close() error { - return c.conn.Close() -} - -func (c *sshConn) LocalAddr() net.Addr { - return c.conn.LocalAddr() -} - -func (c *sshConn) SessionID() []byte { - return dup(c.sessionID) -} - -func (c *sshConn) ClientVersion() []byte { - return dup(c.clientVersion) -} - -func (c *sshConn) ServerVersion() []byte { - return dup(c.serverVersion) -} diff --git a/vendor/golang.org/x/crypto/ssh/doc.go b/vendor/golang.org/x/crypto/ssh/doc.go deleted file mode 100644 index d6be89466..000000000 --- a/vendor/golang.org/x/crypto/ssh/doc.go +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -/* -Package ssh implements an SSH client and server. - -SSH is a transport security protocol, an authentication protocol and a -family of application protocols. The most typical application level -protocol is a remote shell and this is specifically implemented. However, -the multiplexed nature of SSH is exposed to users that wish to support -others. - -References: - [PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD - [SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1 -*/ -package ssh // import "golang.org/x/crypto/ssh" diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go deleted file mode 100644 index 37d42e47f..000000000 --- a/vendor/golang.org/x/crypto/ssh/handshake.go +++ /dev/null @@ -1,460 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto/rand" - "errors" - "fmt" - "io" - "log" - "net" - "sync" -) - -// debugHandshake, if set, prints messages sent and received. Key -// exchange messages are printed as if DH were used, so the debug -// messages are wrong when using ECDH. -const debugHandshake = false - -// keyingTransport is a packet based transport that supports key -// changes. It need not be thread-safe. It should pass through -// msgNewKeys in both directions. -type keyingTransport interface { - packetConn - - // prepareKeyChange sets up a key change. The key change for a - // direction will be effected if a msgNewKeys message is sent - // or received. - prepareKeyChange(*algorithms, *kexResult) error -} - -// handshakeTransport implements rekeying on top of a keyingTransport -// and offers a thread-safe writePacket() interface. -type handshakeTransport struct { - conn keyingTransport - config *Config - - serverVersion []byte - clientVersion []byte - - // hostKeys is non-empty if we are the server. In that case, - // it contains all host keys that can be used to sign the - // connection. - hostKeys []Signer - - // hostKeyAlgorithms is non-empty if we are the client. In that case, - // we accept these key types from the server as host key. - hostKeyAlgorithms []string - - // On read error, incoming is closed, and readError is set. - incoming chan []byte - readError error - - // data for host key checking - hostKeyCallback func(hostname string, remote net.Addr, key PublicKey) error - dialAddress string - remoteAddr net.Addr - - readSinceKex uint64 - - // Protects the writing side of the connection - mu sync.Mutex - cond *sync.Cond - sentInitPacket []byte - sentInitMsg *kexInitMsg - writtenSinceKex uint64 - writeError error - - // The session ID or nil if first kex did not complete yet. - sessionID []byte -} - -func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion, serverVersion []byte) *handshakeTransport { - t := &handshakeTransport{ - conn: conn, - serverVersion: serverVersion, - clientVersion: clientVersion, - incoming: make(chan []byte, 16), - config: config, - } - t.cond = sync.NewCond(&t.mu) - return t -} - -func newClientTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ClientConfig, dialAddr string, addr net.Addr) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.dialAddress = dialAddr - t.remoteAddr = addr - t.hostKeyCallback = config.HostKeyCallback - if config.HostKeyAlgorithms != nil { - t.hostKeyAlgorithms = config.HostKeyAlgorithms - } else { - t.hostKeyAlgorithms = supportedHostKeyAlgos - } - go t.readLoop() - return t -} - -func newServerTransport(conn keyingTransport, clientVersion, serverVersion []byte, config *ServerConfig) *handshakeTransport { - t := newHandshakeTransport(conn, &config.Config, clientVersion, serverVersion) - t.hostKeys = config.hostKeys - go t.readLoop() - return t -} - -func (t *handshakeTransport) getSessionID() []byte { - return t.sessionID -} - -func (t *handshakeTransport) id() string { - if len(t.hostKeys) > 0 { - return "server" - } - return "client" -} - -func (t *handshakeTransport) readPacket() ([]byte, error) { - p, ok := <-t.incoming - if !ok { - return nil, t.readError - } - return p, nil -} - -func (t *handshakeTransport) readLoop() { - for { - p, err := t.readOnePacket() - if err != nil { - t.readError = err - close(t.incoming) - break - } - if p[0] == msgIgnore || p[0] == msgDebug { - continue - } - t.incoming <- p - } - - // If we can't read, declare the writing part dead too. - t.mu.Lock() - defer t.mu.Unlock() - if t.writeError == nil { - t.writeError = t.readError - } - t.cond.Broadcast() -} - -func (t *handshakeTransport) readOnePacket() ([]byte, error) { - if t.readSinceKex > t.config.RekeyThreshold { - if err := t.requestKeyChange(); err != nil { - return nil, err - } - } - - p, err := t.conn.readPacket() - if err != nil { - return nil, err - } - - t.readSinceKex += uint64(len(p)) - if debugHandshake { - if p[0] == msgChannelData || p[0] == msgChannelExtendedData { - log.Printf("%s got data (packet %d bytes)", t.id(), len(p)) - } else { - msg, err := decode(p) - log.Printf("%s got %T %v (%v)", t.id(), msg, msg, err) - } - } - if p[0] != msgKexInit { - return p, nil - } - - t.mu.Lock() - - firstKex := t.sessionID == nil - - err = t.enterKeyExchangeLocked(p) - if err != nil { - // drop connection - t.conn.Close() - t.writeError = err - } - - if debugHandshake { - log.Printf("%s exited key exchange (first %v), err %v", t.id(), firstKex, err) - } - - // Unblock writers. - t.sentInitMsg = nil - t.sentInitPacket = nil - t.cond.Broadcast() - t.writtenSinceKex = 0 - t.mu.Unlock() - - if err != nil { - return nil, err - } - - t.readSinceKex = 0 - - // By default, a key exchange is hidden from higher layers by - // translating it into msgIgnore. - successPacket := []byte{msgIgnore} - if firstKex { - // sendKexInit() for the first kex waits for - // msgNewKeys so the authentication process is - // guaranteed to happen over an encrypted transport. - successPacket = []byte{msgNewKeys} - } - - return successPacket, nil -} - -// keyChangeCategory describes whether a key exchange is the first on a -// connection, or a subsequent one. -type keyChangeCategory bool - -const ( - firstKeyExchange keyChangeCategory = true - subsequentKeyExchange keyChangeCategory = false -) - -// sendKexInit sends a key change message, and returns the message -// that was sent. After initiating the key change, all writes will be -// blocked until the change is done, and a failed key change will -// close the underlying transport. This function is safe for -// concurrent use by multiple goroutines. -func (t *handshakeTransport) sendKexInit(isFirst keyChangeCategory) error { - var err error - - t.mu.Lock() - // If this is the initial key change, but we already have a sessionID, - // then do nothing because the key exchange has already completed - // asynchronously. - if !isFirst || t.sessionID == nil { - _, _, err = t.sendKexInitLocked(isFirst) - } - t.mu.Unlock() - if err != nil { - return err - } - if isFirst { - if packet, err := t.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - } - return nil -} - -func (t *handshakeTransport) requestInitialKeyChange() error { - return t.sendKexInit(firstKeyExchange) -} - -func (t *handshakeTransport) requestKeyChange() error { - return t.sendKexInit(subsequentKeyExchange) -} - -// sendKexInitLocked sends a key change message. t.mu must be locked -// while this happens. -func (t *handshakeTransport) sendKexInitLocked(isFirst keyChangeCategory) (*kexInitMsg, []byte, error) { - // kexInits may be sent either in response to the other side, - // or because our side wants to initiate a key change, so we - // may have already sent a kexInit. In that case, don't send a - // second kexInit. - if t.sentInitMsg != nil { - return t.sentInitMsg, t.sentInitPacket, nil - } - - msg := &kexInitMsg{ - KexAlgos: t.config.KeyExchanges, - CiphersClientServer: t.config.Ciphers, - CiphersServerClient: t.config.Ciphers, - MACsClientServer: t.config.MACs, - MACsServerClient: t.config.MACs, - CompressionClientServer: supportedCompressions, - CompressionServerClient: supportedCompressions, - } - io.ReadFull(rand.Reader, msg.Cookie[:]) - - if len(t.hostKeys) > 0 { - for _, k := range t.hostKeys { - msg.ServerHostKeyAlgos = append( - msg.ServerHostKeyAlgos, k.PublicKey().Type()) - } - } else { - msg.ServerHostKeyAlgos = t.hostKeyAlgorithms - } - packet := Marshal(msg) - - // writePacket destroys the contents, so save a copy. - packetCopy := make([]byte, len(packet)) - copy(packetCopy, packet) - - if err := t.conn.writePacket(packetCopy); err != nil { - return nil, nil, err - } - - t.sentInitMsg = msg - t.sentInitPacket = packet - return msg, packet, nil -} - -func (t *handshakeTransport) writePacket(p []byte) error { - t.mu.Lock() - defer t.mu.Unlock() - - if t.writtenSinceKex > t.config.RekeyThreshold { - t.sendKexInitLocked(subsequentKeyExchange) - } - for t.sentInitMsg != nil && t.writeError == nil { - t.cond.Wait() - } - if t.writeError != nil { - return t.writeError - } - t.writtenSinceKex += uint64(len(p)) - - switch p[0] { - case msgKexInit: - return errors.New("ssh: only handshakeTransport can send kexInit") - case msgNewKeys: - return errors.New("ssh: only handshakeTransport can send newKeys") - default: - return t.conn.writePacket(p) - } -} - -func (t *handshakeTransport) Close() error { - return t.conn.Close() -} - -// enterKeyExchange runs the key exchange. t.mu must be held while running this. -func (t *handshakeTransport) enterKeyExchangeLocked(otherInitPacket []byte) error { - if debugHandshake { - log.Printf("%s entered key exchange", t.id()) - } - myInit, myInitPacket, err := t.sendKexInitLocked(subsequentKeyExchange) - if err != nil { - return err - } - - otherInit := &kexInitMsg{} - if err := Unmarshal(otherInitPacket, otherInit); err != nil { - return err - } - - magics := handshakeMagics{ - clientVersion: t.clientVersion, - serverVersion: t.serverVersion, - clientKexInit: otherInitPacket, - serverKexInit: myInitPacket, - } - - clientInit := otherInit - serverInit := myInit - if len(t.hostKeys) == 0 { - clientInit = myInit - serverInit = otherInit - - magics.clientKexInit = myInitPacket - magics.serverKexInit = otherInitPacket - } - - algs, err := findAgreedAlgorithms(clientInit, serverInit) - if err != nil { - return err - } - - // We don't send FirstKexFollows, but we handle receiving it. - // - // RFC 4253 section 7 defines the kex and the agreement method for - // first_kex_packet_follows. It states that the guessed packet - // should be ignored if the "kex algorithm and/or the host - // key algorithm is guessed wrong (server and client have - // different preferred algorithm), or if any of the other - // algorithms cannot be agreed upon". The other algorithms have - // already been checked above so the kex algorithm and host key - // algorithm are checked here. - if otherInit.FirstKexFollows && (clientInit.KexAlgos[0] != serverInit.KexAlgos[0] || clientInit.ServerHostKeyAlgos[0] != serverInit.ServerHostKeyAlgos[0]) { - // other side sent a kex message for the wrong algorithm, - // which we have to ignore. - if _, err := t.conn.readPacket(); err != nil { - return err - } - } - - kex, ok := kexAlgoMap[algs.kex] - if !ok { - return fmt.Errorf("ssh: unexpected key exchange algorithm %v", algs.kex) - } - - var result *kexResult - if len(t.hostKeys) > 0 { - result, err = t.server(kex, algs, &magics) - } else { - result, err = t.client(kex, algs, &magics) - } - - if err != nil { - return err - } - - if t.sessionID == nil { - t.sessionID = result.H - } - result.SessionID = t.sessionID - - t.conn.prepareKeyChange(algs, result) - if err = t.conn.writePacket([]byte{msgNewKeys}); err != nil { - return err - } - if packet, err := t.conn.readPacket(); err != nil { - return err - } else if packet[0] != msgNewKeys { - return unexpectedMessageError(msgNewKeys, packet[0]) - } - - return nil -} - -func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - var hostKey Signer - for _, k := range t.hostKeys { - if algs.hostKey == k.PublicKey().Type() { - hostKey = k - } - } - - r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey) - return r, err -} - -func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) { - result, err := kex.Client(t.conn, t.config.Rand, magics) - if err != nil { - return nil, err - } - - hostKey, err := ParsePublicKey(result.HostKey) - if err != nil { - return nil, err - } - - if err := verifyHostKeySignature(hostKey, result); err != nil { - return nil, err - } - - if t.hostKeyCallback != nil { - err = t.hostKeyCallback(t.dialAddress, t.remoteAddr, hostKey) - if err != nil { - return nil, err - } - } - - return result, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/kex.go b/vendor/golang.org/x/crypto/ssh/kex.go deleted file mode 100644 index c87fbebfd..000000000 --- a/vendor/golang.org/x/crypto/ssh/kex.go +++ /dev/null @@ -1,540 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/subtle" - "errors" - "io" - "math/big" - - "golang.org/x/crypto/curve25519" -) - -const ( - kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1" - kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1" - kexAlgoECDH256 = "ecdh-sha2-nistp256" - kexAlgoECDH384 = "ecdh-sha2-nistp384" - kexAlgoECDH521 = "ecdh-sha2-nistp521" - kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org" -) - -// kexResult captures the outcome of a key exchange. -type kexResult struct { - // Session hash. See also RFC 4253, section 8. - H []byte - - // Shared secret. See also RFC 4253, section 8. - K []byte - - // Host key as hashed into H. - HostKey []byte - - // Signature of H. - Signature []byte - - // A cryptographic hash function that matches the security - // level of the key exchange algorithm. It is used for - // calculating H, and for deriving keys from H and K. - Hash crypto.Hash - - // The session ID, which is the first H computed. This is used - // to derive key material inside the transport. - SessionID []byte -} - -// handshakeMagics contains data that is always included in the -// session hash. -type handshakeMagics struct { - clientVersion, serverVersion []byte - clientKexInit, serverKexInit []byte -} - -func (m *handshakeMagics) write(w io.Writer) { - writeString(w, m.clientVersion) - writeString(w, m.serverVersion) - writeString(w, m.clientKexInit) - writeString(w, m.serverKexInit) -} - -// kexAlgorithm abstracts different key exchange algorithms. -type kexAlgorithm interface { - // Server runs server-side key agreement, signing the result - // with a hostkey. - Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error) - - // Client runs the client-side key agreement. Caller is - // responsible for verifying the host key signature. - Client(p packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) -} - -// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement. -type dhGroup struct { - g, p, pMinus1 *big.Int -} - -func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) { - if theirPublic.Cmp(bigOne) <= 0 || theirPublic.Cmp(group.pMinus1) >= 0 { - return nil, errors.New("ssh: DH parameter out of bounds") - } - return new(big.Int).Exp(theirPublic, myPrivate, group.p), nil -} - -func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) { - hashFunc := crypto.SHA1 - - var x *big.Int - for { - var err error - if x, err = rand.Int(randSource, group.pMinus1); err != nil { - return nil, err - } - if x.Sign() > 0 { - break - } - } - - X := new(big.Int).Exp(group.g, x, group.p) - kexDHInit := kexDHInitMsg{ - X: X, - } - if err := c.writePacket(Marshal(&kexDHInit)); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexDHReply kexDHReplyMsg - if err = Unmarshal(packet, &kexDHReply); err != nil { - return nil, err - } - - kInt, err := group.diffieHellman(kexDHReply.Y, x) - if err != nil { - return nil, err - } - - h := hashFunc.New() - magics.write(h) - writeString(h, kexDHReply.HostKey) - writeInt(h, X) - writeInt(h, kexDHReply.Y) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: kexDHReply.HostKey, - Signature: kexDHReply.Signature, - Hash: crypto.SHA1, - }, nil -} - -func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - hashFunc := crypto.SHA1 - packet, err := c.readPacket() - if err != nil { - return - } - var kexDHInit kexDHInitMsg - if err = Unmarshal(packet, &kexDHInit); err != nil { - return - } - - var y *big.Int - for { - if y, err = rand.Int(randSource, group.pMinus1); err != nil { - return - } - if y.Sign() > 0 { - break - } - } - - Y := new(big.Int).Exp(group.g, y, group.p) - kInt, err := group.diffieHellman(kexDHInit.X, y) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := hashFunc.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeInt(h, kexDHInit.X) - writeInt(h, Y) - - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, randSource, H) - if err != nil { - return nil, err - } - - kexDHReply := kexDHReplyMsg{ - HostKey: hostKeyBytes, - Y: Y, - Signature: sig, - } - packet = Marshal(&kexDHReply) - - err = c.writePacket(packet) - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA1, - }, nil -} - -// ecdh performs Elliptic Curve Diffie-Hellman key exchange as -// described in RFC 5656, section 4. -type ecdh struct { - curve elliptic.Curve -} - -func (kex *ecdh) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - kexInit := kexECDHInitMsg{ - ClientPubKey: elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y), - } - - serialized := Marshal(&kexInit) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - - x, y, err := unmarshalECKey(kex.curve, reply.EphemeralPubKey) - if err != nil { - return nil, err - } - - // generate shared secret - secret, _ := kex.curve.ScalarMult(x, y, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kexInit.ClientPubKey) - writeString(h, reply.EphemeralPubKey) - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: ecHash(kex.curve), - }, nil -} - -// unmarshalECKey parses and checks an EC key. -func unmarshalECKey(curve elliptic.Curve, pubkey []byte) (x, y *big.Int, err error) { - x, y = elliptic.Unmarshal(curve, pubkey) - if x == nil { - return nil, nil, errors.New("ssh: elliptic.Unmarshal failure") - } - if !validateECPublicKey(curve, x, y) { - return nil, nil, errors.New("ssh: public key not on curve") - } - return x, y, nil -} - -// validateECPublicKey checks that the point is a valid public key for -// the given curve. See [SEC1], 3.2.2 -func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool { - if x.Sign() == 0 && y.Sign() == 0 { - return false - } - - if x.Cmp(curve.Params().P) >= 0 { - return false - } - - if y.Cmp(curve.Params().P) >= 0 { - return false - } - - if !curve.IsOnCurve(x, y) { - return false - } - - // We don't check if N * PubKey == 0, since - // - // - the NIST curves have cofactor = 1, so this is implicit. - // (We don't foresee an implementation that supports non NIST - // curves) - // - // - for ephemeral keys, we don't need to worry about small - // subgroup attacks. - return true -} - -func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var kexECDHInit kexECDHInitMsg - if err = Unmarshal(packet, &kexECDHInit); err != nil { - return nil, err - } - - clientX, clientY, err := unmarshalECKey(kex.curve, kexECDHInit.ClientPubKey) - if err != nil { - return nil, err - } - - // We could cache this key across multiple users/multiple - // connection attempts, but the benefit is small. OpenSSH - // generates a new key for each incoming connection. - ephKey, err := ecdsa.GenerateKey(kex.curve, rand) - if err != nil { - return nil, err - } - - hostKeyBytes := priv.PublicKey().Marshal() - - serializedEphKey := elliptic.Marshal(kex.curve, ephKey.PublicKey.X, ephKey.PublicKey.Y) - - // generate shared secret - secret, _ := kex.curve.ScalarMult(clientX, clientY, ephKey.D.Bytes()) - - h := ecHash(kex.curve).New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexECDHInit.ClientPubKey) - writeString(h, serializedEphKey) - - K := make([]byte, intLength(secret)) - marshalInt(K, secret) - h.Write(K) - - H := h.Sum(nil) - - // H is already a hash, but the hostkey signing will apply its - // own key-specific hash algorithm. - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: serializedEphKey, - HostKey: hostKeyBytes, - Signature: sig, - } - - serialized := Marshal(&reply) - if err := c.writePacket(serialized); err != nil { - return nil, err - } - - return &kexResult{ - H: H, - K: K, - HostKey: reply.HostKey, - Signature: sig, - Hash: ecHash(kex.curve), - }, nil -} - -var kexAlgoMap = map[string]kexAlgorithm{} - -func init() { - // This is the group called diffie-hellman-group1-sha1 in RFC - // 4253 and Oakley Group 2 in RFC 2409. - p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16) - kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - // This is the group called diffie-hellman-group14-sha1 in RFC - // 4253 and Oakley Group 14 in RFC 3526. - p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16) - - kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{ - g: new(big.Int).SetInt64(2), - p: p, - pMinus1: new(big.Int).Sub(p, bigOne), - } - - kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()} - kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()} - kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()} - kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{} -} - -// curve25519sha256 implements the curve25519-sha256@libssh.org key -// agreement protocol, as described in -// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt -type curve25519sha256 struct{} - -type curve25519KeyPair struct { - priv [32]byte - pub [32]byte -} - -func (kp *curve25519KeyPair) generate(rand io.Reader) error { - if _, err := io.ReadFull(rand, kp.priv[:]); err != nil { - return err - } - curve25519.ScalarBaseMult(&kp.pub, &kp.priv) - return nil -} - -// curve25519Zeros is just an array of 32 zero bytes so that we have something -// convenient to compare against in order to reject curve25519 points with the -// wrong order. -var curve25519Zeros [32]byte - -func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handshakeMagics) (*kexResult, error) { - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - if err := c.writePacket(Marshal(&kexECDHInitMsg{kp.pub[:]})); err != nil { - return nil, err - } - - packet, err := c.readPacket() - if err != nil { - return nil, err - } - - var reply kexECDHReplyMsg - if err = Unmarshal(packet, &reply); err != nil { - return nil, err - } - if len(reply.EphemeralPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var servPub, secret [32]byte - copy(servPub[:], reply.EphemeralPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &servPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, reply.HostKey) - writeString(h, kp.pub[:]) - writeString(h, reply.EphemeralPubKey) - - kInt := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - return &kexResult{ - H: h.Sum(nil), - K: K, - HostKey: reply.HostKey, - Signature: reply.Signature, - Hash: crypto.SHA256, - }, nil -} - -func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) { - packet, err := c.readPacket() - if err != nil { - return - } - var kexInit kexECDHInitMsg - if err = Unmarshal(packet, &kexInit); err != nil { - return - } - - if len(kexInit.ClientPubKey) != 32 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong length") - } - - var kp curve25519KeyPair - if err := kp.generate(rand); err != nil { - return nil, err - } - - var clientPub, secret [32]byte - copy(clientPub[:], kexInit.ClientPubKey) - curve25519.ScalarMult(&secret, &kp.priv, &clientPub) - if subtle.ConstantTimeCompare(secret[:], curve25519Zeros[:]) == 1 { - return nil, errors.New("ssh: peer's curve25519 public value has wrong order") - } - - hostKeyBytes := priv.PublicKey().Marshal() - - h := crypto.SHA256.New() - magics.write(h) - writeString(h, hostKeyBytes) - writeString(h, kexInit.ClientPubKey) - writeString(h, kp.pub[:]) - - kInt := new(big.Int).SetBytes(secret[:]) - K := make([]byte, intLength(kInt)) - marshalInt(K, kInt) - h.Write(K) - - H := h.Sum(nil) - - sig, err := signAndMarshal(priv, rand, H) - if err != nil { - return nil, err - } - - reply := kexECDHReplyMsg{ - EphemeralPubKey: kp.pub[:], - HostKey: hostKeyBytes, - Signature: sig, - } - if err := c.writePacket(Marshal(&reply)); err != nil { - return nil, err - } - return &kexResult{ - H: H, - K: K, - HostKey: hostKeyBytes, - Signature: sig, - Hash: crypto.SHA256, - }, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/keys.go b/vendor/golang.org/x/crypto/ssh/keys.go deleted file mode 100644 index f2fc9b6c9..000000000 --- a/vendor/golang.org/x/crypto/ssh/keys.go +++ /dev/null @@ -1,880 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "crypto" - "crypto/dsa" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/x509" - "encoding/asn1" - "encoding/base64" - "encoding/pem" - "errors" - "fmt" - "io" - "math/big" - "strings" - - "golang.org/x/crypto/ed25519" -) - -// These constants represent the algorithm names for key types supported by this -// package. -const ( - KeyAlgoRSA = "ssh-rsa" - KeyAlgoDSA = "ssh-dss" - KeyAlgoECDSA256 = "ecdsa-sha2-nistp256" - KeyAlgoECDSA384 = "ecdsa-sha2-nistp384" - KeyAlgoECDSA521 = "ecdsa-sha2-nistp521" - KeyAlgoED25519 = "ssh-ed25519" -) - -// parsePubKey parses a public key of the given algorithm. -// Use ParsePublicKey for keys with prepended algorithm. -func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err error) { - switch algo { - case KeyAlgoRSA: - return parseRSA(in) - case KeyAlgoDSA: - return parseDSA(in) - case KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521: - return parseECDSA(in) - case KeyAlgoED25519: - return parseED25519(in) - case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: - cert, err := parseCert(in, certToPrivAlgo(algo)) - if err != nil { - return nil, nil, err - } - return cert, nil, nil - } - return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo) -} - -// parseAuthorizedKey parses a public key in OpenSSH authorized_keys format -// (see sshd(8) manual page) once the options and key type fields have been -// removed. -func parseAuthorizedKey(in []byte) (out PublicKey, comment string, err error) { - in = bytes.TrimSpace(in) - - i := bytes.IndexAny(in, " \t") - if i == -1 { - i = len(in) - } - base64Key := in[:i] - - key := make([]byte, base64.StdEncoding.DecodedLen(len(base64Key))) - n, err := base64.StdEncoding.Decode(key, base64Key) - if err != nil { - return nil, "", err - } - key = key[:n] - out, err = ParsePublicKey(key) - if err != nil { - return nil, "", err - } - comment = string(bytes.TrimSpace(in[i:])) - return out, comment, nil -} - -// ParseKnownHosts parses an entry in the format of the known_hosts file. -// -// The known_hosts format is documented in the sshd(8) manual page. This -// function will parse a single entry from in. On successful return, marker -// will contain the optional marker value (i.e. "cert-authority" or "revoked") -// or else be empty, hosts will contain the hosts that this entry matches, -// pubKey will contain the public key and comment will contain any trailing -// comment at the end of the line. See the sshd(8) manual page for the various -// forms that a host string can take. -// -// The unparsed remainder of the input will be returned in rest. This function -// can be called repeatedly to parse multiple entries. -// -// If no entries were found in the input then err will be io.EOF. Otherwise a -// non-nil err value indicates a parse error. -func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey, comment string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - // Strip out the beginning of the known_host key. - // This is either an optional marker or a (set of) hostname(s). - keyFields := bytes.Fields(in) - if len(keyFields) < 3 || len(keyFields) > 5 { - return "", nil, nil, "", nil, errors.New("ssh: invalid entry in known_hosts data") - } - - // keyFields[0] is either "@cert-authority", "@revoked" or a comma separated - // list of hosts - marker := "" - if keyFields[0][0] == '@' { - marker = string(keyFields[0][1:]) - keyFields = keyFields[1:] - } - - hosts := string(keyFields[0]) - // keyFields[1] contains the key type (e.g. “ssh-rsa”). - // However, that information is duplicated inside the - // base64-encoded key and so is ignored here. - - key := bytes.Join(keyFields[2:], []byte(" ")) - if pubKey, comment, err = parseAuthorizedKey(key); err != nil { - return "", nil, nil, "", nil, err - } - - return marker, strings.Split(hosts, ","), pubKey, comment, rest, nil - } - - return "", nil, nil, "", nil, io.EOF -} - -// ParseAuthorizedKeys parses a public key from an authorized_keys -// file used in OpenSSH according to the sshd(8) manual page. -func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) { - for len(in) > 0 { - end := bytes.IndexByte(in, '\n') - if end != -1 { - rest = in[end+1:] - in = in[:end] - } else { - rest = nil - } - - end = bytes.IndexByte(in, '\r') - if end != -1 { - in = in[:end] - } - - in = bytes.TrimSpace(in) - if len(in) == 0 || in[0] == '#' { - in = rest - continue - } - - i := bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - return out, comment, options, rest, nil - } - - // No key type recognised. Maybe there's an options field at - // the beginning. - var b byte - inQuote := false - var candidateOptions []string - optionStart := 0 - for i, b = range in { - isEnd := !inQuote && (b == ' ' || b == '\t') - if (b == ',' && !inQuote) || isEnd { - if i-optionStart > 0 { - candidateOptions = append(candidateOptions, string(in[optionStart:i])) - } - optionStart = i + 1 - } - if isEnd { - break - } - if b == '"' && (i == 0 || (i > 0 && in[i-1] != '\\')) { - inQuote = !inQuote - } - } - for i < len(in) && (in[i] == ' ' || in[i] == '\t') { - i++ - } - if i == len(in) { - // Invalid line: unmatched quote - in = rest - continue - } - - in = in[i:] - i = bytes.IndexAny(in, " \t") - if i == -1 { - in = rest - continue - } - - if out, comment, err = parseAuthorizedKey(in[i:]); err == nil { - options = candidateOptions - return out, comment, options, rest, nil - } - - in = rest - continue - } - - return nil, "", nil, nil, errors.New("ssh: no key found") -} - -// ParsePublicKey parses an SSH public key formatted for use in -// the SSH wire protocol according to RFC 4253, section 6.6. -func ParsePublicKey(in []byte) (out PublicKey, err error) { - algo, in, ok := parseString(in) - if !ok { - return nil, errShortRead - } - var rest []byte - out, rest, err = parsePubKey(in, string(algo)) - if len(rest) > 0 { - return nil, errors.New("ssh: trailing junk in public key") - } - - return out, err -} - -// MarshalAuthorizedKey serializes key for inclusion in an OpenSSH -// authorized_keys file. The return value ends with newline. -func MarshalAuthorizedKey(key PublicKey) []byte { - b := &bytes.Buffer{} - b.WriteString(key.Type()) - b.WriteByte(' ') - e := base64.NewEncoder(base64.StdEncoding, b) - e.Write(key.Marshal()) - e.Close() - b.WriteByte('\n') - return b.Bytes() -} - -// PublicKey is an abstraction of different types of public keys. -type PublicKey interface { - // Type returns the key's type, e.g. "ssh-rsa". - Type() string - - // Marshal returns the serialized key data in SSH wire format, - // with the name prefix. - Marshal() []byte - - // Verify that sig is a signature on the given data using this - // key. This function will hash the data appropriately first. - Verify(data []byte, sig *Signature) error -} - -// CryptoPublicKey, if implemented by a PublicKey, -// returns the underlying crypto.PublicKey form of the key. -type CryptoPublicKey interface { - CryptoPublicKey() crypto.PublicKey -} - -// A Signer can create signatures that verify against a public key. -type Signer interface { - // PublicKey returns an associated PublicKey instance. - PublicKey() PublicKey - - // Sign returns raw signature for the given data. This method - // will apply the hash specified for the keytype to the data. - Sign(rand io.Reader, data []byte) (*Signature, error) -} - -type rsaPublicKey rsa.PublicKey - -func (r *rsaPublicKey) Type() string { - return "ssh-rsa" -} - -// parseRSA parses an RSA key according to RFC 4253, section 6.6. -func parseRSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - E *big.Int - N *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - if w.E.BitLen() > 24 { - return nil, nil, errors.New("ssh: exponent too large") - } - e := w.E.Int64() - if e < 3 || e&1 == 0 { - return nil, nil, errors.New("ssh: incorrect exponent") - } - - var key rsa.PublicKey - key.E = int(e) - key.N = w.N - return (*rsaPublicKey)(&key), w.Rest, nil -} - -func (r *rsaPublicKey) Marshal() []byte { - e := new(big.Int).SetInt64(int64(r.E)) - // RSA publickey struct layout should match the struct used by - // parseRSACert in the x/crypto/ssh/agent package. - wirekey := struct { - Name string - E *big.Int - N *big.Int - }{ - KeyAlgoRSA, - e, - r.N, - } - return Marshal(&wirekey) -} - -func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != r.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - return rsa.VerifyPKCS1v15((*rsa.PublicKey)(r), crypto.SHA1, digest, sig.Blob) -} - -func (r *rsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*rsa.PublicKey)(r) -} - -type dsaPublicKey dsa.PublicKey - -func (r *dsaPublicKey) Type() string { - return "ssh-dss" -} - -// parseDSA parses an DSA key according to RFC 4253, section 6.6. -func parseDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - P, Q, G, Y *big.Int - Rest []byte `ssh:"rest"` - } - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := &dsaPublicKey{ - Parameters: dsa.Parameters{ - P: w.P, - Q: w.Q, - G: w.G, - }, - Y: w.Y, - } - return key, w.Rest, nil -} - -func (k *dsaPublicKey) Marshal() []byte { - // DSA publickey struct layout should match the struct used by - // parseDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - P, Q, G, Y *big.Int - }{ - k.Type(), - k.P, - k.Q, - k.G, - k.Y, - } - - return Marshal(&w) -} - -func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != k.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type()) - } - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 4253, section 6.6, - // The value for 'dss_signature_blob' is encoded as a string containing - // r, followed by s (which are 160-bit integers, without lengths or - // padding, unsigned, and in network byte order). - // For DSS purposes, sig.Blob should be exactly 40 bytes in length. - if len(sig.Blob) != 40 { - return errors.New("ssh: DSA signature parse error") - } - r := new(big.Int).SetBytes(sig.Blob[:20]) - s := new(big.Int).SetBytes(sig.Blob[20:]) - if dsa.Verify((*dsa.PublicKey)(k), digest, r, s) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *dsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*dsa.PublicKey)(k) -} - -type dsaPrivateKey struct { - *dsa.PrivateKey -} - -func (k *dsaPrivateKey) PublicKey() PublicKey { - return (*dsaPublicKey)(&k.PrivateKey.PublicKey) -} - -func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) { - h := crypto.SHA1.New() - h.Write(data) - digest := h.Sum(nil) - r, s, err := dsa.Sign(rand, k.PrivateKey, digest) - if err != nil { - return nil, err - } - - sig := make([]byte, 40) - rb := r.Bytes() - sb := s.Bytes() - - copy(sig[20-len(rb):20], rb) - copy(sig[40-len(sb):], sb) - - return &Signature{ - Format: k.PublicKey().Type(), - Blob: sig, - }, nil -} - -type ecdsaPublicKey ecdsa.PublicKey - -func (key *ecdsaPublicKey) Type() string { - return "ecdsa-sha2-" + key.nistID() -} - -func (key *ecdsaPublicKey) nistID() string { - switch key.Params().BitSize { - case 256: - return "nistp256" - case 384: - return "nistp384" - case 521: - return "nistp521" - } - panic("ssh: unsupported ecdsa key size") -} - -type ed25519PublicKey ed25519.PublicKey - -func (key ed25519PublicKey) Type() string { - return KeyAlgoED25519 -} - -func parseED25519(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := ed25519.PublicKey(w.KeyBytes) - - return (ed25519PublicKey)(key), w.Rest, nil -} - -func (key ed25519PublicKey) Marshal() []byte { - w := struct { - Name string - KeyBytes []byte - }{ - KeyAlgoED25519, - []byte(key), - } - return Marshal(&w) -} - -func (key ed25519PublicKey) Verify(b []byte, sig *Signature) error { - if sig.Format != key.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) - } - - edKey := (ed25519.PublicKey)(key) - if ok := ed25519.Verify(edKey, b, sig.Blob); !ok { - return errors.New("ssh: signature did not verify") - } - - return nil -} - -func (k ed25519PublicKey) CryptoPublicKey() crypto.PublicKey { - return ed25519.PublicKey(k) -} - -func supportedEllipticCurve(curve elliptic.Curve) bool { - return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521() -} - -// ecHash returns the hash to match the given elliptic curve, see RFC -// 5656, section 6.2.1 -func ecHash(curve elliptic.Curve) crypto.Hash { - bitSize := curve.Params().BitSize - switch { - case bitSize <= 256: - return crypto.SHA256 - case bitSize <= 384: - return crypto.SHA384 - } - return crypto.SHA512 -} - -// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1. -func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) { - var w struct { - Curve string - KeyBytes []byte - Rest []byte `ssh:"rest"` - } - - if err := Unmarshal(in, &w); err != nil { - return nil, nil, err - } - - key := new(ecdsa.PublicKey) - - switch w.Curve { - case "nistp256": - key.Curve = elliptic.P256() - case "nistp384": - key.Curve = elliptic.P384() - case "nistp521": - key.Curve = elliptic.P521() - default: - return nil, nil, errors.New("ssh: unsupported curve") - } - - key.X, key.Y = elliptic.Unmarshal(key.Curve, w.KeyBytes) - if key.X == nil || key.Y == nil { - return nil, nil, errors.New("ssh: invalid curve point") - } - return (*ecdsaPublicKey)(key), w.Rest, nil -} - -func (key *ecdsaPublicKey) Marshal() []byte { - // See RFC 5656, section 3.1. - keyBytes := elliptic.Marshal(key.Curve, key.X, key.Y) - // ECDSA publickey struct layout should match the struct used by - // parseECDSACert in the x/crypto/ssh/agent package. - w := struct { - Name string - ID string - Key []byte - }{ - key.Type(), - key.nistID(), - keyBytes, - } - - return Marshal(&w) -} - -func (key *ecdsaPublicKey) Verify(data []byte, sig *Signature) error { - if sig.Format != key.Type() { - return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, key.Type()) - } - - h := ecHash(key.Curve).New() - h.Write(data) - digest := h.Sum(nil) - - // Per RFC 5656, section 3.1.2, - // The ecdsa_signature_blob value has the following specific encoding: - // mpint r - // mpint s - var ecSig struct { - R *big.Int - S *big.Int - } - - if err := Unmarshal(sig.Blob, &ecSig); err != nil { - return err - } - - if ecdsa.Verify((*ecdsa.PublicKey)(key), digest, ecSig.R, ecSig.S) { - return nil - } - return errors.New("ssh: signature did not verify") -} - -func (k *ecdsaPublicKey) CryptoPublicKey() crypto.PublicKey { - return (*ecdsa.PublicKey)(k) -} - -// NewSignerFromKey takes an *rsa.PrivateKey, *dsa.PrivateKey, -// *ecdsa.PrivateKey or any other crypto.Signer and returns a corresponding -// Signer instance. ECDSA keys must use P-256, P-384 or P-521. -func NewSignerFromKey(key interface{}) (Signer, error) { - switch key := key.(type) { - case crypto.Signer: - return NewSignerFromSigner(key) - case *dsa.PrivateKey: - return &dsaPrivateKey{key}, nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -type wrappedSigner struct { - signer crypto.Signer - pubKey PublicKey -} - -// NewSignerFromSigner takes any crypto.Signer implementation and -// returns a corresponding Signer interface. This can be used, for -// example, with keys kept in hardware modules. -func NewSignerFromSigner(signer crypto.Signer) (Signer, error) { - pubKey, err := NewPublicKey(signer.Public()) - if err != nil { - return nil, err - } - - return &wrappedSigner{signer, pubKey}, nil -} - -func (s *wrappedSigner) PublicKey() PublicKey { - return s.pubKey -} - -func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) { - var hashFunc crypto.Hash - - switch key := s.pubKey.(type) { - case *rsaPublicKey, *dsaPublicKey: - hashFunc = crypto.SHA1 - case *ecdsaPublicKey: - hashFunc = ecHash(key.Curve) - case ed25519PublicKey: - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } - - var digest []byte - if hashFunc != 0 { - h := hashFunc.New() - h.Write(data) - digest = h.Sum(nil) - } else { - digest = data - } - - signature, err := s.signer.Sign(rand, digest, hashFunc) - if err != nil { - return nil, err - } - - // crypto.Signer.Sign is expected to return an ASN.1-encoded signature - // for ECDSA and DSA, but that's not the encoding expected by SSH, so - // re-encode. - switch s.pubKey.(type) { - case *ecdsaPublicKey, *dsaPublicKey: - type asn1Signature struct { - R, S *big.Int - } - asn1Sig := new(asn1Signature) - _, err := asn1.Unmarshal(signature, asn1Sig) - if err != nil { - return nil, err - } - - switch s.pubKey.(type) { - case *ecdsaPublicKey: - signature = Marshal(asn1Sig) - - case *dsaPublicKey: - signature = make([]byte, 40) - r := asn1Sig.R.Bytes() - s := asn1Sig.S.Bytes() - copy(signature[20-len(r):20], r) - copy(signature[40-len(s):40], s) - } - } - - return &Signature{ - Format: s.pubKey.Type(), - Blob: signature, - }, nil -} - -// NewPublicKey takes an *rsa.PublicKey, *dsa.PublicKey, *ecdsa.PublicKey, -// or ed25519.PublicKey returns a corresponding PublicKey instance. -// ECDSA keys must use P-256, P-384 or P-521. -func NewPublicKey(key interface{}) (PublicKey, error) { - switch key := key.(type) { - case *rsa.PublicKey: - return (*rsaPublicKey)(key), nil - case *ecdsa.PublicKey: - if !supportedEllipticCurve(key.Curve) { - return nil, errors.New("ssh: only P-256, P-384 and P-521 EC keys are supported.") - } - return (*ecdsaPublicKey)(key), nil - case *dsa.PublicKey: - return (*dsaPublicKey)(key), nil - case ed25519.PublicKey: - return (ed25519PublicKey)(key), nil - default: - return nil, fmt.Errorf("ssh: unsupported key type %T", key) - } -} - -// ParsePrivateKey returns a Signer from a PEM encoded private key. It supports -// the same keys as ParseRawPrivateKey. -func ParsePrivateKey(pemBytes []byte) (Signer, error) { - key, err := ParseRawPrivateKey(pemBytes) - if err != nil { - return nil, err - } - - return NewSignerFromKey(key) -} - -// encryptedBlock tells whether a private key is -// encrypted by examining its Proc-Type header -// for a mention of ENCRYPTED -// according to RFC 1421 Section 4.6.1.1. -func encryptedBlock(block *pem.Block) bool { - return strings.Contains(block.Headers["Proc-Type"], "ENCRYPTED") -} - -// ParseRawPrivateKey returns a private key from a PEM encoded private key. It -// supports RSA (PKCS#1), DSA (OpenSSL), and ECDSA private keys. -func ParseRawPrivateKey(pemBytes []byte) (interface{}, error) { - block, _ := pem.Decode(pemBytes) - if block == nil { - return nil, errors.New("ssh: no key found") - } - - if encryptedBlock(block) { - return nil, errors.New("ssh: cannot decode encrypted private keys") - } - - switch block.Type { - case "RSA PRIVATE KEY": - return x509.ParsePKCS1PrivateKey(block.Bytes) - case "EC PRIVATE KEY": - return x509.ParseECPrivateKey(block.Bytes) - case "DSA PRIVATE KEY": - return ParseDSAPrivateKey(block.Bytes) - case "OPENSSH PRIVATE KEY": - return parseOpenSSHPrivateKey(block.Bytes) - default: - return nil, fmt.Errorf("ssh: unsupported key type %q", block.Type) - } -} - -// ParseDSAPrivateKey returns a DSA private key from its ASN.1 DER encoding, as -// specified by the OpenSSL DSA man page. -func ParseDSAPrivateKey(der []byte) (*dsa.PrivateKey, error) { - var k struct { - Version int - P *big.Int - Q *big.Int - G *big.Int - Priv *big.Int - Pub *big.Int - } - rest, err := asn1.Unmarshal(der, &k) - if err != nil { - return nil, errors.New("ssh: failed to parse DSA key: " + err.Error()) - } - if len(rest) > 0 { - return nil, errors.New("ssh: garbage after DSA key") - } - - return &dsa.PrivateKey{ - PublicKey: dsa.PublicKey{ - Parameters: dsa.Parameters{ - P: k.P, - Q: k.Q, - G: k.G, - }, - Y: k.Priv, - }, - X: k.Pub, - }, nil -} - -// Implemented based on the documentation at -// https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key -func parseOpenSSHPrivateKey(key []byte) (*ed25519.PrivateKey, error) { - magic := append([]byte("openssh-key-v1"), 0) - if !bytes.Equal(magic, key[0:len(magic)]) { - return nil, errors.New("ssh: invalid openssh private key format") - } - remaining := key[len(magic):] - - var w struct { - CipherName string - KdfName string - KdfOpts string - NumKeys uint32 - PubKey []byte - PrivKeyBlock []byte - } - - if err := Unmarshal(remaining, &w); err != nil { - return nil, err - } - - pk1 := struct { - Check1 uint32 - Check2 uint32 - Keytype string - Pub []byte - Priv []byte - Comment string - Pad []byte `ssh:"rest"` - }{} - - if err := Unmarshal(w.PrivKeyBlock, &pk1); err != nil { - return nil, err - } - - if pk1.Check1 != pk1.Check2 { - return nil, errors.New("ssh: checkint mismatch") - } - - // we only handle ed25519 keys currently - if pk1.Keytype != KeyAlgoED25519 { - return nil, errors.New("ssh: unhandled key type") - } - - for i, b := range pk1.Pad { - if int(b) != i+1 { - return nil, errors.New("ssh: padding not as expected") - } - } - - if len(pk1.Priv) != ed25519.PrivateKeySize { - return nil, errors.New("ssh: private key unexpected length") - } - - pk := ed25519.PrivateKey(make([]byte, ed25519.PrivateKeySize)) - copy(pk, pk1.Priv) - return &pk, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/mac.go b/vendor/golang.org/x/crypto/ssh/mac.go deleted file mode 100644 index 07744ad67..000000000 --- a/vendor/golang.org/x/crypto/ssh/mac.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Message authentication support - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "hash" -) - -type macMode struct { - keySize int - new func(key []byte) hash.Hash -} - -// truncatingMAC wraps around a hash.Hash and truncates the output digest to -// a given size. -type truncatingMAC struct { - length int - hmac hash.Hash -} - -func (t truncatingMAC) Write(data []byte) (int, error) { - return t.hmac.Write(data) -} - -func (t truncatingMAC) Sum(in []byte) []byte { - out := t.hmac.Sum(in) - return out[:len(in)+t.length] -} - -func (t truncatingMAC) Reset() { - t.hmac.Reset() -} - -func (t truncatingMAC) Size() int { - return t.length -} - -func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() } - -var macModes = map[string]*macMode{ - "hmac-sha2-256": {32, func(key []byte) hash.Hash { - return hmac.New(sha256.New, key) - }}, - "hmac-sha1": {20, func(key []byte) hash.Hash { - return hmac.New(sha1.New, key) - }}, - "hmac-sha1-96": {20, func(key []byte) hash.Hash { - return truncatingMAC{12, hmac.New(sha1.New, key)} - }}, -} diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go deleted file mode 100644 index e6ecd3afa..000000000 --- a/vendor/golang.org/x/crypto/ssh/messages.go +++ /dev/null @@ -1,758 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "math/big" - "reflect" - "strconv" - "strings" -) - -// These are SSH message type numbers. They are scattered around several -// documents but many were taken from [SSH-PARAMETERS]. -const ( - msgIgnore = 2 - msgUnimplemented = 3 - msgDebug = 4 - msgNewKeys = 21 - - // Standard authentication messages - msgUserAuthSuccess = 52 - msgUserAuthBanner = 53 -) - -// SSH messages: -// -// These structures mirror the wire format of the corresponding SSH messages. -// They are marshaled using reflection with the marshal and unmarshal functions -// in this file. The only wrinkle is that a final member of type []byte with a -// ssh tag of "rest" receives the remainder of a packet when unmarshaling. - -// See RFC 4253, section 11.1. -const msgDisconnect = 1 - -// disconnectMsg is the message that signals a disconnect. It is also -// the error type returned from mux.Wait() -type disconnectMsg struct { - Reason uint32 `sshtype:"1"` - Message string - Language string -} - -func (d *disconnectMsg) Error() string { - return fmt.Sprintf("ssh: disconnect, reason %d: %s", d.Reason, d.Message) -} - -// See RFC 4253, section 7.1. -const msgKexInit = 20 - -type kexInitMsg struct { - Cookie [16]byte `sshtype:"20"` - KexAlgos []string - ServerHostKeyAlgos []string - CiphersClientServer []string - CiphersServerClient []string - MACsClientServer []string - MACsServerClient []string - CompressionClientServer []string - CompressionServerClient []string - LanguagesClientServer []string - LanguagesServerClient []string - FirstKexFollows bool - Reserved uint32 -} - -// See RFC 4253, section 8. - -// Diffie-Helman -const msgKexDHInit = 30 - -type kexDHInitMsg struct { - X *big.Int `sshtype:"30"` -} - -const msgKexECDHInit = 30 - -type kexECDHInitMsg struct { - ClientPubKey []byte `sshtype:"30"` -} - -const msgKexECDHReply = 31 - -type kexECDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - EphemeralPubKey []byte - Signature []byte -} - -const msgKexDHReply = 31 - -type kexDHReplyMsg struct { - HostKey []byte `sshtype:"31"` - Y *big.Int - Signature []byte -} - -// See RFC 4253, section 10. -const msgServiceRequest = 5 - -type serviceRequestMsg struct { - Service string `sshtype:"5"` -} - -// See RFC 4253, section 10. -const msgServiceAccept = 6 - -type serviceAcceptMsg struct { - Service string `sshtype:"6"` -} - -// See RFC 4252, section 5. -const msgUserAuthRequest = 50 - -type userAuthRequestMsg struct { - User string `sshtype:"50"` - Service string - Method string - Payload []byte `ssh:"rest"` -} - -// Used for debug printouts of packets. -type userAuthSuccessMsg struct { -} - -// See RFC 4252, section 5.1 -const msgUserAuthFailure = 51 - -type userAuthFailureMsg struct { - Methods []string `sshtype:"51"` - PartialSuccess bool -} - -// See RFC 4256, section 3.2 -const msgUserAuthInfoRequest = 60 -const msgUserAuthInfoResponse = 61 - -type userAuthInfoRequestMsg struct { - User string `sshtype:"60"` - Instruction string - DeprecatedLanguage string - NumPrompts uint32 - Prompts []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpen = 90 - -type channelOpenMsg struct { - ChanType string `sshtype:"90"` - PeersId uint32 - PeersWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -const msgChannelExtendedData = 95 -const msgChannelData = 94 - -// Used for debug print outs of packets. -type channelDataMsg struct { - PeersId uint32 `sshtype:"94"` - Length uint32 - Rest []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenConfirm = 91 - -type channelOpenConfirmMsg struct { - PeersId uint32 `sshtype:"91"` - MyId uint32 - MyWindow uint32 - MaxPacketSize uint32 - TypeSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.1. -const msgChannelOpenFailure = 92 - -type channelOpenFailureMsg struct { - PeersId uint32 `sshtype:"92"` - Reason RejectionReason - Message string - Language string -} - -const msgChannelRequest = 98 - -type channelRequestMsg struct { - PeersId uint32 `sshtype:"98"` - Request string - WantReply bool - RequestSpecificData []byte `ssh:"rest"` -} - -// See RFC 4254, section 5.4. -const msgChannelSuccess = 99 - -type channelRequestSuccessMsg struct { - PeersId uint32 `sshtype:"99"` -} - -// See RFC 4254, section 5.4. -const msgChannelFailure = 100 - -type channelRequestFailureMsg struct { - PeersId uint32 `sshtype:"100"` -} - -// See RFC 4254, section 5.3 -const msgChannelClose = 97 - -type channelCloseMsg struct { - PeersId uint32 `sshtype:"97"` -} - -// See RFC 4254, section 5.3 -const msgChannelEOF = 96 - -type channelEOFMsg struct { - PeersId uint32 `sshtype:"96"` -} - -// See RFC 4254, section 4 -const msgGlobalRequest = 80 - -type globalRequestMsg struct { - Type string `sshtype:"80"` - WantReply bool - Data []byte `ssh:"rest"` -} - -// See RFC 4254, section 4 -const msgRequestSuccess = 81 - -type globalRequestSuccessMsg struct { - Data []byte `ssh:"rest" sshtype:"81"` -} - -// See RFC 4254, section 4 -const msgRequestFailure = 82 - -type globalRequestFailureMsg struct { - Data []byte `ssh:"rest" sshtype:"82"` -} - -// See RFC 4254, section 5.2 -const msgChannelWindowAdjust = 93 - -type windowAdjustMsg struct { - PeersId uint32 `sshtype:"93"` - AdditionalBytes uint32 -} - -// See RFC 4252, section 7 -const msgUserAuthPubKeyOk = 60 - -type userAuthPubKeyOkMsg struct { - Algo string `sshtype:"60"` - PubKey []byte -} - -// typeTags returns the possible type bytes for the given reflect.Type, which -// should be a struct. The possible values are separated by a '|' character. -func typeTags(structType reflect.Type) (tags []byte) { - tagStr := structType.Field(0).Tag.Get("sshtype") - - for _, tag := range strings.Split(tagStr, "|") { - i, err := strconv.Atoi(tag) - if err == nil { - tags = append(tags, byte(i)) - } - } - - return tags -} - -func fieldError(t reflect.Type, field int, problem string) error { - if problem != "" { - problem = ": " + problem - } - return fmt.Errorf("ssh: unmarshal error for field %s of type %s%s", t.Field(field).Name, t.Name(), problem) -} - -var errShortRead = errors.New("ssh: short read") - -// Unmarshal parses data in SSH wire format into a structure. The out -// argument should be a pointer to struct. If the first member of the -// struct has the "sshtype" tag set to a '|'-separated set of numbers -// in decimal, the packet must start with one of those numbers. In -// case of error, Unmarshal returns a ParseError or -// UnexpectedMessageError. -func Unmarshal(data []byte, out interface{}) error { - v := reflect.ValueOf(out).Elem() - structType := v.Type() - expectedTypes := typeTags(structType) - - var expectedType byte - if len(expectedTypes) > 0 { - expectedType = expectedTypes[0] - } - - if len(data) == 0 { - return parseError(expectedType) - } - - if len(expectedTypes) > 0 { - goodType := false - for _, e := range expectedTypes { - if e > 0 && data[0] == e { - goodType = true - break - } - } - if !goodType { - return fmt.Errorf("ssh: unexpected message type %d (expected one of %v)", data[0], expectedTypes) - } - data = data[1:] - } - - var ok bool - for i := 0; i < v.NumField(); i++ { - field := v.Field(i) - t := field.Type() - switch t.Kind() { - case reflect.Bool: - if len(data) < 1 { - return errShortRead - } - field.SetBool(data[0] != 0) - data = data[1:] - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - return fieldError(structType, i, "array of unsupported type") - } - if len(data) < t.Len() { - return errShortRead - } - for j, n := 0, t.Len(); j < n; j++ { - field.Index(j).Set(reflect.ValueOf(data[j])) - } - data = data[t.Len():] - case reflect.Uint64: - var u64 uint64 - if u64, data, ok = parseUint64(data); !ok { - return errShortRead - } - field.SetUint(u64) - case reflect.Uint32: - var u32 uint32 - if u32, data, ok = parseUint32(data); !ok { - return errShortRead - } - field.SetUint(uint64(u32)) - case reflect.Uint8: - if len(data) < 1 { - return errShortRead - } - field.SetUint(uint64(data[0])) - data = data[1:] - case reflect.String: - var s []byte - if s, data, ok = parseString(data); !ok { - return fieldError(structType, i, "") - } - field.SetString(string(s)) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if structType.Field(i).Tag.Get("ssh") == "rest" { - field.Set(reflect.ValueOf(data)) - data = nil - } else { - var s []byte - if s, data, ok = parseString(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(s)) - } - case reflect.String: - var nl []string - if nl, data, ok = parseNameList(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(nl)) - default: - return fieldError(structType, i, "slice of unsupported type") - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - if n, data, ok = parseInt(data); !ok { - return errShortRead - } - field.Set(reflect.ValueOf(n)) - } else { - return fieldError(structType, i, "pointer to unsupported type") - } - default: - return fieldError(structType, i, fmt.Sprintf("unsupported type: %v", t)) - } - } - - if len(data) != 0 { - return parseError(expectedType) - } - - return nil -} - -// Marshal serializes the message in msg to SSH wire format. The msg -// argument should be a struct or pointer to struct. If the first -// member has the "sshtype" tag set to a number in decimal, that -// number is prepended to the result. If the last of member has the -// "ssh" tag set to "rest", its contents are appended to the output. -func Marshal(msg interface{}) []byte { - out := make([]byte, 0, 64) - return marshalStruct(out, msg) -} - -func marshalStruct(out []byte, msg interface{}) []byte { - v := reflect.Indirect(reflect.ValueOf(msg)) - msgTypes := typeTags(v.Type()) - if len(msgTypes) > 0 { - out = append(out, msgTypes[0]) - } - - for i, n := 0, v.NumField(); i < n; i++ { - field := v.Field(i) - switch t := field.Type(); t.Kind() { - case reflect.Bool: - var v uint8 - if field.Bool() { - v = 1 - } - out = append(out, v) - case reflect.Array: - if t.Elem().Kind() != reflect.Uint8 { - panic(fmt.Sprintf("array of non-uint8 in field %d: %T", i, field.Interface())) - } - for j, l := 0, t.Len(); j < l; j++ { - out = append(out, uint8(field.Index(j).Uint())) - } - case reflect.Uint32: - out = appendU32(out, uint32(field.Uint())) - case reflect.Uint64: - out = appendU64(out, uint64(field.Uint())) - case reflect.Uint8: - out = append(out, uint8(field.Uint())) - case reflect.String: - s := field.String() - out = appendInt(out, len(s)) - out = append(out, s...) - case reflect.Slice: - switch t.Elem().Kind() { - case reflect.Uint8: - if v.Type().Field(i).Tag.Get("ssh") != "rest" { - out = appendInt(out, field.Len()) - } - out = append(out, field.Bytes()...) - case reflect.String: - offset := len(out) - out = appendU32(out, 0) - if n := field.Len(); n > 0 { - for j := 0; j < n; j++ { - f := field.Index(j) - if j != 0 { - out = append(out, ',') - } - out = append(out, f.String()...) - } - // overwrite length value - binary.BigEndian.PutUint32(out[offset:], uint32(len(out)-offset-4)) - } - default: - panic(fmt.Sprintf("slice of unknown type in field %d: %T", i, field.Interface())) - } - case reflect.Ptr: - if t == bigIntType { - var n *big.Int - nValue := reflect.ValueOf(&n) - nValue.Elem().Set(field) - needed := intLength(n) - oldLength := len(out) - - if cap(out)-len(out) < needed { - newOut := make([]byte, len(out), 2*(len(out)+needed)) - copy(newOut, out) - out = newOut - } - out = out[:oldLength+needed] - marshalInt(out[oldLength:], n) - } else { - panic(fmt.Sprintf("pointer to unknown type in field %d: %T", i, field.Interface())) - } - } - } - - return out -} - -var bigOne = big.NewInt(1) - -func parseString(in []byte) (out, rest []byte, ok bool) { - if len(in) < 4 { - return - } - length := binary.BigEndian.Uint32(in) - in = in[4:] - if uint32(len(in)) < length { - return - } - out = in[:length] - rest = in[length:] - ok = true - return -} - -var ( - comma = []byte{','} - emptyNameList = []string{} -) - -func parseNameList(in []byte) (out []string, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - if len(contents) == 0 { - out = emptyNameList - return - } - parts := bytes.Split(contents, comma) - out = make([]string, len(parts)) - for i, part := range parts { - out[i] = string(part) - } - return -} - -func parseInt(in []byte) (out *big.Int, rest []byte, ok bool) { - contents, rest, ok := parseString(in) - if !ok { - return - } - out = new(big.Int) - - if len(contents) > 0 && contents[0]&0x80 == 0x80 { - // This is a negative number - notBytes := make([]byte, len(contents)) - for i := range notBytes { - notBytes[i] = ^contents[i] - } - out.SetBytes(notBytes) - out.Add(out, bigOne) - out.Neg(out) - } else { - // Positive number - out.SetBytes(contents) - } - ok = true - return -} - -func parseUint32(in []byte) (uint32, []byte, bool) { - if len(in) < 4 { - return 0, nil, false - } - return binary.BigEndian.Uint32(in), in[4:], true -} - -func parseUint64(in []byte) (uint64, []byte, bool) { - if len(in) < 8 { - return 0, nil, false - } - return binary.BigEndian.Uint64(in), in[8:], true -} - -func intLength(n *big.Int) int { - length := 4 /* length bytes */ - if n.Sign() < 0 { - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bitLen := nMinus1.BitLen() - if bitLen%8 == 0 { - // The number will need 0xff padding - length++ - } - length += (bitLen + 7) / 8 - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bitLen := n.BitLen() - if bitLen%8 == 0 { - // The number will need 0x00 padding - length++ - } - length += (bitLen + 7) / 8 - } - - return length -} - -func marshalUint32(to []byte, n uint32) []byte { - binary.BigEndian.PutUint32(to, n) - return to[4:] -} - -func marshalUint64(to []byte, n uint64) []byte { - binary.BigEndian.PutUint64(to, n) - return to[8:] -} - -func marshalInt(to []byte, n *big.Int) []byte { - lengthBytes := to - to = to[4:] - length := 0 - - if n.Sign() < 0 { - // A negative number has to be converted to two's-complement - // form. So we'll subtract 1 and invert. If the - // most-significant-bit isn't set then we'll need to pad the - // beginning with 0xff in order to keep the number negative. - nMinus1 := new(big.Int).Neg(n) - nMinus1.Sub(nMinus1, bigOne) - bytes := nMinus1.Bytes() - for i := range bytes { - bytes[i] ^= 0xff - } - if len(bytes) == 0 || bytes[0]&0x80 == 0 { - to[0] = 0xff - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } else if n.Sign() == 0 { - // A zero is the zero length string - } else { - bytes := n.Bytes() - if len(bytes) > 0 && bytes[0]&0x80 != 0 { - // We'll have to pad this with a 0x00 in order to - // stop it looking like a negative number. - to[0] = 0 - to = to[1:] - length++ - } - nBytes := copy(to, bytes) - to = to[nBytes:] - length += nBytes - } - - lengthBytes[0] = byte(length >> 24) - lengthBytes[1] = byte(length >> 16) - lengthBytes[2] = byte(length >> 8) - lengthBytes[3] = byte(length) - return to -} - -func writeInt(w io.Writer, n *big.Int) { - length := intLength(n) - buf := make([]byte, length) - marshalInt(buf, n) - w.Write(buf) -} - -func writeString(w io.Writer, s []byte) { - var lengthBytes [4]byte - lengthBytes[0] = byte(len(s) >> 24) - lengthBytes[1] = byte(len(s) >> 16) - lengthBytes[2] = byte(len(s) >> 8) - lengthBytes[3] = byte(len(s)) - w.Write(lengthBytes[:]) - w.Write(s) -} - -func stringLength(n int) int { - return 4 + n -} - -func marshalString(to []byte, s []byte) []byte { - to[0] = byte(len(s) >> 24) - to[1] = byte(len(s) >> 16) - to[2] = byte(len(s) >> 8) - to[3] = byte(len(s)) - to = to[4:] - copy(to, s) - return to[len(s):] -} - -var bigIntType = reflect.TypeOf((*big.Int)(nil)) - -// Decode a packet into its corresponding message. -func decode(packet []byte) (interface{}, error) { - var msg interface{} - switch packet[0] { - case msgDisconnect: - msg = new(disconnectMsg) - case msgServiceRequest: - msg = new(serviceRequestMsg) - case msgServiceAccept: - msg = new(serviceAcceptMsg) - case msgKexInit: - msg = new(kexInitMsg) - case msgKexDHInit: - msg = new(kexDHInitMsg) - case msgKexDHReply: - msg = new(kexDHReplyMsg) - case msgUserAuthRequest: - msg = new(userAuthRequestMsg) - case msgUserAuthSuccess: - return new(userAuthSuccessMsg), nil - case msgUserAuthFailure: - msg = new(userAuthFailureMsg) - case msgUserAuthPubKeyOk: - msg = new(userAuthPubKeyOkMsg) - case msgGlobalRequest: - msg = new(globalRequestMsg) - case msgRequestSuccess: - msg = new(globalRequestSuccessMsg) - case msgRequestFailure: - msg = new(globalRequestFailureMsg) - case msgChannelOpen: - msg = new(channelOpenMsg) - case msgChannelData: - msg = new(channelDataMsg) - case msgChannelOpenConfirm: - msg = new(channelOpenConfirmMsg) - case msgChannelOpenFailure: - msg = new(channelOpenFailureMsg) - case msgChannelWindowAdjust: - msg = new(windowAdjustMsg) - case msgChannelEOF: - msg = new(channelEOFMsg) - case msgChannelClose: - msg = new(channelCloseMsg) - case msgChannelRequest: - msg = new(channelRequestMsg) - case msgChannelSuccess: - msg = new(channelRequestSuccessMsg) - case msgChannelFailure: - msg = new(channelRequestFailureMsg) - default: - return nil, unexpectedMessageError(0, packet[0]) - } - if err := Unmarshal(packet, msg); err != nil { - return nil, err - } - return msg, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/mux.go b/vendor/golang.org/x/crypto/ssh/mux.go deleted file mode 100644 index f3a3ddd78..000000000 --- a/vendor/golang.org/x/crypto/ssh/mux.go +++ /dev/null @@ -1,330 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "encoding/binary" - "fmt" - "io" - "log" - "sync" - "sync/atomic" -) - -// debugMux, if set, causes messages in the connection protocol to be -// logged. -const debugMux = false - -// chanList is a thread safe channel list. -type chanList struct { - // protects concurrent access to chans - sync.Mutex - - // chans are indexed by the local id of the channel, which the - // other side should send in the PeersId field. - chans []*channel - - // This is a debugging aid: it offsets all IDs by this - // amount. This helps distinguish otherwise identical - // server/client muxes - offset uint32 -} - -// Assigns a channel ID to the given channel. -func (c *chanList) add(ch *channel) uint32 { - c.Lock() - defer c.Unlock() - for i := range c.chans { - if c.chans[i] == nil { - c.chans[i] = ch - return uint32(i) + c.offset - } - } - c.chans = append(c.chans, ch) - return uint32(len(c.chans)-1) + c.offset -} - -// getChan returns the channel for the given ID. -func (c *chanList) getChan(id uint32) *channel { - id -= c.offset - - c.Lock() - defer c.Unlock() - if id < uint32(len(c.chans)) { - return c.chans[id] - } - return nil -} - -func (c *chanList) remove(id uint32) { - id -= c.offset - c.Lock() - if id < uint32(len(c.chans)) { - c.chans[id] = nil - } - c.Unlock() -} - -// dropAll forgets all channels it knows, returning them in a slice. -func (c *chanList) dropAll() []*channel { - c.Lock() - defer c.Unlock() - var r []*channel - - for _, ch := range c.chans { - if ch == nil { - continue - } - r = append(r, ch) - } - c.chans = nil - return r -} - -// mux represents the state for the SSH connection protocol, which -// multiplexes many channels onto a single packet transport. -type mux struct { - conn packetConn - chanList chanList - - incomingChannels chan NewChannel - - globalSentMu sync.Mutex - globalResponses chan interface{} - incomingRequests chan *Request - - errCond *sync.Cond - err error -} - -// When debugging, each new chanList instantiation has a different -// offset. -var globalOff uint32 - -func (m *mux) Wait() error { - m.errCond.L.Lock() - defer m.errCond.L.Unlock() - for m.err == nil { - m.errCond.Wait() - } - return m.err -} - -// newMux returns a mux that runs over the given connection. -func newMux(p packetConn) *mux { - m := &mux{ - conn: p, - incomingChannels: make(chan NewChannel, 16), - globalResponses: make(chan interface{}, 1), - incomingRequests: make(chan *Request, 16), - errCond: newCond(), - } - if debugMux { - m.chanList.offset = atomic.AddUint32(&globalOff, 1) - } - - go m.loop() - return m -} - -func (m *mux) sendMessage(msg interface{}) error { - p := Marshal(msg) - if debugMux { - log.Printf("send global(%d): %#v", m.chanList.offset, msg) - } - return m.conn.writePacket(p) -} - -func (m *mux) SendRequest(name string, wantReply bool, payload []byte) (bool, []byte, error) { - if wantReply { - m.globalSentMu.Lock() - defer m.globalSentMu.Unlock() - } - - if err := m.sendMessage(globalRequestMsg{ - Type: name, - WantReply: wantReply, - Data: payload, - }); err != nil { - return false, nil, err - } - - if !wantReply { - return false, nil, nil - } - - msg, ok := <-m.globalResponses - if !ok { - return false, nil, io.EOF - } - switch msg := msg.(type) { - case *globalRequestFailureMsg: - return false, msg.Data, nil - case *globalRequestSuccessMsg: - return true, msg.Data, nil - default: - return false, nil, fmt.Errorf("ssh: unexpected response to request: %#v", msg) - } -} - -// ackRequest must be called after processing a global request that -// has WantReply set. -func (m *mux) ackRequest(ok bool, data []byte) error { - if ok { - return m.sendMessage(globalRequestSuccessMsg{Data: data}) - } - return m.sendMessage(globalRequestFailureMsg{Data: data}) -} - -func (m *mux) Close() error { - return m.conn.Close() -} - -// loop runs the connection machine. It will process packets until an -// error is encountered. To synchronize on loop exit, use mux.Wait. -func (m *mux) loop() { - var err error - for err == nil { - err = m.onePacket() - } - - for _, ch := range m.chanList.dropAll() { - ch.close() - } - - close(m.incomingChannels) - close(m.incomingRequests) - close(m.globalResponses) - - m.conn.Close() - - m.errCond.L.Lock() - m.err = err - m.errCond.Broadcast() - m.errCond.L.Unlock() - - if debugMux { - log.Println("loop exit", err) - } -} - -// onePacket reads and processes one packet. -func (m *mux) onePacket() error { - packet, err := m.conn.readPacket() - if err != nil { - return err - } - - if debugMux { - if packet[0] == msgChannelData || packet[0] == msgChannelExtendedData { - log.Printf("decoding(%d): data packet - %d bytes", m.chanList.offset, len(packet)) - } else { - p, _ := decode(packet) - log.Printf("decoding(%d): %d %#v - %d bytes", m.chanList.offset, packet[0], p, len(packet)) - } - } - - switch packet[0] { - case msgChannelOpen: - return m.handleChannelOpen(packet) - case msgGlobalRequest, msgRequestSuccess, msgRequestFailure: - return m.handleGlobalPacket(packet) - } - - // assume a channel packet. - if len(packet) < 5 { - return parseError(packet[0]) - } - id := binary.BigEndian.Uint32(packet[1:]) - ch := m.chanList.getChan(id) - if ch == nil { - return fmt.Errorf("ssh: invalid channel %d", id) - } - - return ch.handlePacket(packet) -} - -func (m *mux) handleGlobalPacket(packet []byte) error { - msg, err := decode(packet) - if err != nil { - return err - } - - switch msg := msg.(type) { - case *globalRequestMsg: - m.incomingRequests <- &Request{ - Type: msg.Type, - WantReply: msg.WantReply, - Payload: msg.Data, - mux: m, - } - case *globalRequestSuccessMsg, *globalRequestFailureMsg: - m.globalResponses <- msg - default: - panic(fmt.Sprintf("not a global message %#v", msg)) - } - - return nil -} - -// handleChannelOpen schedules a channel to be Accept()ed. -func (m *mux) handleChannelOpen(packet []byte) error { - var msg channelOpenMsg - if err := Unmarshal(packet, &msg); err != nil { - return err - } - - if msg.MaxPacketSize < minPacketLength || msg.MaxPacketSize > 1<<31 { - failMsg := channelOpenFailureMsg{ - PeersId: msg.PeersId, - Reason: ConnectionFailed, - Message: "invalid request", - Language: "en_US.UTF-8", - } - return m.sendMessage(failMsg) - } - - c := m.newChannel(msg.ChanType, channelInbound, msg.TypeSpecificData) - c.remoteId = msg.PeersId - c.maxRemotePayload = msg.MaxPacketSize - c.remoteWin.add(msg.PeersWindow) - m.incomingChannels <- c - return nil -} - -func (m *mux) OpenChannel(chanType string, extra []byte) (Channel, <-chan *Request, error) { - ch, err := m.openChannel(chanType, extra) - if err != nil { - return nil, nil, err - } - - return ch, ch.incomingRequests, nil -} - -func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) { - ch := m.newChannel(chanType, channelOutbound, extra) - - ch.maxIncomingPayload = channelMaxPacket - - open := channelOpenMsg{ - ChanType: chanType, - PeersWindow: ch.myWindow, - MaxPacketSize: ch.maxIncomingPayload, - TypeSpecificData: extra, - PeersId: ch.localId, - } - if err := m.sendMessage(open); err != nil { - return nil, err - } - - switch msg := (<-ch.msg).(type) { - case *channelOpenConfirmMsg: - return ch, nil - case *channelOpenFailureMsg: - return nil, &OpenChannelError{msg.Reason, msg.Message} - default: - return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg) - } -} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go deleted file mode 100644 index 37df1b302..000000000 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ /dev/null @@ -1,488 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bytes" - "errors" - "fmt" - "io" - "net" -) - -// The Permissions type holds fine-grained permissions that are -// specific to a user or a specific authentication method for a -// user. Permissions, except for "source-address", must be enforced in -// the server application layer, after successful authentication. The -// Permissions are passed on in ServerConn so a server implementation -// can honor them. -type Permissions struct { - // Critical options restrict default permissions. Common - // restrictions are "source-address" and "force-command". If - // the server cannot enforce the restriction, or does not - // recognize it, the user should not authenticate. - CriticalOptions map[string]string - - // Extensions are extra functionality that the server may - // offer on authenticated connections. Common extensions are - // "permit-agent-forwarding", "permit-X11-forwarding". Lack of - // support for an extension does not preclude authenticating a - // user. - Extensions map[string]string -} - -// ServerConfig holds server specific configuration data. -type ServerConfig struct { - // Config contains configuration shared between client and server. - Config - - hostKeys []Signer - - // NoClientAuth is true if clients are allowed to connect without - // authenticating. - NoClientAuth bool - - // PasswordCallback, if non-nil, is called when a user - // attempts to authenticate using a password. - PasswordCallback func(conn ConnMetadata, password []byte) (*Permissions, error) - - // PublicKeyCallback, if non-nil, is called when a client attempts public - // key authentication. It must return true if the given public key is - // valid for the given user. For example, see CertChecker.Authenticate. - PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error) - - // KeyboardInteractiveCallback, if non-nil, is called when - // keyboard-interactive authentication is selected (RFC - // 4256). The client object's Challenge function should be - // used to query the user. The callback may offer multiple - // Challenge rounds. To avoid information leaks, the client - // should be presented a challenge even if the user is - // unknown. - KeyboardInteractiveCallback func(conn ConnMetadata, client KeyboardInteractiveChallenge) (*Permissions, error) - - // AuthLogCallback, if non-nil, is called to log all authentication - // attempts. - AuthLogCallback func(conn ConnMetadata, method string, err error) - - // ServerVersion is the version identification string to announce in - // the public handshake. - // If empty, a reasonable default is used. - // Note that RFC 4253 section 4.2 requires that this string start with - // "SSH-2.0-". - ServerVersion string -} - -// AddHostKey adds a private key as a host key. If an existing host -// key exists with the same algorithm, it is overwritten. Each server -// config must have at least one host key. -func (s *ServerConfig) AddHostKey(key Signer) { - for i, k := range s.hostKeys { - if k.PublicKey().Type() == key.PublicKey().Type() { - s.hostKeys[i] = key - return - } - } - - s.hostKeys = append(s.hostKeys, key) -} - -// cachedPubKey contains the results of querying whether a public key is -// acceptable for a user. -type cachedPubKey struct { - user string - pubKeyData []byte - result error - perms *Permissions -} - -const maxCachedPubKeys = 16 - -// pubKeyCache caches tests for public keys. Since SSH clients -// will query whether a public key is acceptable before attempting to -// authenticate with it, we end up with duplicate queries for public -// key validity. The cache only applies to a single ServerConn. -type pubKeyCache struct { - keys []cachedPubKey -} - -// get returns the result for a given user/algo/key tuple. -func (c *pubKeyCache) get(user string, pubKeyData []byte) (cachedPubKey, bool) { - for _, k := range c.keys { - if k.user == user && bytes.Equal(k.pubKeyData, pubKeyData) { - return k, true - } - } - return cachedPubKey{}, false -} - -// add adds the given tuple to the cache. -func (c *pubKeyCache) add(candidate cachedPubKey) { - if len(c.keys) < maxCachedPubKeys { - c.keys = append(c.keys, candidate) - } -} - -// ServerConn is an authenticated SSH connection, as seen from the -// server -type ServerConn struct { - Conn - - // If the succeeding authentication callback returned a - // non-nil Permissions pointer, it is stored here. - Permissions *Permissions -} - -// NewServerConn starts a new SSH server with c as the underlying -// transport. It starts with a handshake and, if the handshake is -// unsuccessful, it closes the connection and returns an error. The -// Request and NewChannel channels must be serviced, or the connection -// will hang. -func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewChannel, <-chan *Request, error) { - fullConf := *config - fullConf.SetDefaults() - s := &connection{ - sshConn: sshConn{conn: c}, - } - perms, err := s.serverHandshake(&fullConf) - if err != nil { - c.Close() - return nil, nil, nil, err - } - return &ServerConn{s, perms}, s.mux.incomingChannels, s.mux.incomingRequests, nil -} - -// signAndMarshal signs the data with the appropriate algorithm, -// and serializes the result in SSH wire format. -func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) { - sig, err := k.Sign(rand, data) - if err != nil { - return nil, err - } - - return Marshal(sig), nil -} - -// handshake performs key exchange and user authentication. -func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) { - if len(config.hostKeys) == 0 { - return nil, errors.New("ssh: server has no host keys") - } - - if !config.NoClientAuth && config.PasswordCallback == nil && config.PublicKeyCallback == nil && config.KeyboardInteractiveCallback == nil { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if config.ServerVersion != "" { - s.serverVersion = []byte(config.ServerVersion) - } else { - s.serverVersion = []byte(packageVersion) - } - var err error - s.clientVersion, err = exchangeVersions(s.sshConn.conn, s.serverVersion) - if err != nil { - return nil, err - } - - tr := newTransport(s.sshConn.conn, config.Rand, false /* not client */) - s.transport = newServerTransport(tr, s.clientVersion, s.serverVersion, config) - - if err := s.transport.requestInitialKeyChange(); err != nil { - return nil, err - } - - // We just did the key change, so the session ID is established. - s.sessionID = s.transport.getSessionID() - - var packet []byte - if packet, err = s.transport.readPacket(); err != nil { - return nil, err - } - - var serviceRequest serviceRequestMsg - if err = Unmarshal(packet, &serviceRequest); err != nil { - return nil, err - } - if serviceRequest.Service != serviceUserAuth { - return nil, errors.New("ssh: requested service '" + serviceRequest.Service + "' before authenticating") - } - serviceAccept := serviceAcceptMsg{ - Service: serviceUserAuth, - } - if err := s.transport.writePacket(Marshal(&serviceAccept)); err != nil { - return nil, err - } - - perms, err := s.serverAuthenticate(config) - if err != nil { - return nil, err - } - s.mux = newMux(s.transport) - return perms, err -} - -func isAcceptableAlgo(algo string) bool { - switch algo { - case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01: - return true - } - return false -} - -func checkSourceAddress(addr net.Addr, sourceAddr string) error { - if addr == nil { - return errors.New("ssh: no address known for client, but source-address match required") - } - - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - return fmt.Errorf("ssh: remote address %v is not an TCP address when checking source-address match", addr) - } - - if allowedIP := net.ParseIP(sourceAddr); allowedIP != nil { - if bytes.Equal(allowedIP, tcpAddr.IP) { - return nil - } - } else { - _, ipNet, err := net.ParseCIDR(sourceAddr) - if err != nil { - return fmt.Errorf("ssh: error parsing source-address restriction %q: %v", sourceAddr, err) - } - - if ipNet.Contains(tcpAddr.IP) { - return nil - } - } - - return fmt.Errorf("ssh: remote address %v is not allowed because of source-address restriction", addr) -} - -func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { - var err error - var cache pubKeyCache - var perms *Permissions - -userAuthLoop: - for { - var userAuthReq userAuthRequestMsg - if packet, err := s.transport.readPacket(); err != nil { - return nil, err - } else if err = Unmarshal(packet, &userAuthReq); err != nil { - return nil, err - } - - if userAuthReq.Service != serviceSSH { - return nil, errors.New("ssh: client attempted to negotiate for unknown service: " + userAuthReq.Service) - } - - s.user = userAuthReq.User - perms = nil - authErr := errors.New("no auth passed yet") - - switch userAuthReq.Method { - case "none": - if config.NoClientAuth { - authErr = nil - } - case "password": - if config.PasswordCallback == nil { - authErr = errors.New("ssh: password auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 || payload[0] != 0 { - return nil, parseError(msgUserAuthRequest) - } - payload = payload[1:] - password, payload, ok := parseString(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - perms, authErr = config.PasswordCallback(s, password) - case "keyboard-interactive": - if config.KeyboardInteractiveCallback == nil { - authErr = errors.New("ssh: keyboard-interactive auth not configubred") - break - } - - prompter := &sshClientKeyboardInteractive{s} - perms, authErr = config.KeyboardInteractiveCallback(s, prompter.Challenge) - case "publickey": - if config.PublicKeyCallback == nil { - authErr = errors.New("ssh: publickey auth not configured") - break - } - payload := userAuthReq.Payload - if len(payload) < 1 { - return nil, parseError(msgUserAuthRequest) - } - isQuery := payload[0] == 0 - payload = payload[1:] - algoBytes, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - algo := string(algoBytes) - if !isAcceptableAlgo(algo) { - authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo) - break - } - - pubKeyData, payload, ok := parseString(payload) - if !ok { - return nil, parseError(msgUserAuthRequest) - } - - pubKey, err := ParsePublicKey(pubKeyData) - if err != nil { - return nil, err - } - - candidate, ok := cache.get(s.user, pubKeyData) - if !ok { - candidate.user = s.user - candidate.pubKeyData = pubKeyData - candidate.perms, candidate.result = config.PublicKeyCallback(s, pubKey) - if candidate.result == nil && candidate.perms != nil && candidate.perms.CriticalOptions != nil && candidate.perms.CriticalOptions[sourceAddressCriticalOption] != "" { - candidate.result = checkSourceAddress( - s.RemoteAddr(), - candidate.perms.CriticalOptions[sourceAddressCriticalOption]) - } - cache.add(candidate) - } - - if isQuery { - // The client can query if the given public key - // would be okay. - if len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - - if candidate.result == nil { - okMsg := userAuthPubKeyOkMsg{ - Algo: algo, - PubKey: pubKeyData, - } - if err = s.transport.writePacket(Marshal(&okMsg)); err != nil { - return nil, err - } - continue userAuthLoop - } - authErr = candidate.result - } else { - sig, payload, ok := parseSignature(payload) - if !ok || len(payload) > 0 { - return nil, parseError(msgUserAuthRequest) - } - // Ensure the public key algo and signature algo - // are supported. Compare the private key - // algorithm name that corresponds to algo with - // sig.Format. This is usually the same, but - // for certs, the names differ. - if !isAcceptableAlgo(sig.Format) { - break - } - signedData := buildDataSignedForAuth(s.transport.getSessionID(), userAuthReq, algoBytes, pubKeyData) - - if err := pubKey.Verify(signedData, sig); err != nil { - return nil, err - } - - authErr = candidate.result - perms = candidate.perms - } - default: - authErr = fmt.Errorf("ssh: unknown method %q", userAuthReq.Method) - } - - if config.AuthLogCallback != nil { - config.AuthLogCallback(s, userAuthReq.Method, authErr) - } - - if authErr == nil { - break userAuthLoop - } - - var failureMsg userAuthFailureMsg - if config.PasswordCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "password") - } - if config.PublicKeyCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "publickey") - } - if config.KeyboardInteractiveCallback != nil { - failureMsg.Methods = append(failureMsg.Methods, "keyboard-interactive") - } - - if len(failureMsg.Methods) == 0 { - return nil, errors.New("ssh: no authentication methods configured but NoClientAuth is also false") - } - - if err = s.transport.writePacket(Marshal(&failureMsg)); err != nil { - return nil, err - } - } - - if err = s.transport.writePacket([]byte{msgUserAuthSuccess}); err != nil { - return nil, err - } - return perms, nil -} - -// sshClientKeyboardInteractive implements a ClientKeyboardInteractive by -// asking the client on the other side of a ServerConn. -type sshClientKeyboardInteractive struct { - *connection -} - -func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) { - if len(questions) != len(echos) { - return nil, errors.New("ssh: echos and questions must have equal length") - } - - var prompts []byte - for i := range questions { - prompts = appendString(prompts, questions[i]) - prompts = appendBool(prompts, echos[i]) - } - - if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{ - Instruction: instruction, - NumPrompts: uint32(len(questions)), - Prompts: prompts, - })); err != nil { - return nil, err - } - - packet, err := c.transport.readPacket() - if err != nil { - return nil, err - } - if packet[0] != msgUserAuthInfoResponse { - return nil, unexpectedMessageError(msgUserAuthInfoResponse, packet[0]) - } - packet = packet[1:] - - n, packet, ok := parseUint32(packet) - if !ok || int(n) != len(questions) { - return nil, parseError(msgUserAuthInfoResponse) - } - - for i := uint32(0); i < n; i++ { - ans, rest, ok := parseString(packet) - if !ok { - return nil, parseError(msgUserAuthInfoResponse) - } - - answers = append(answers, string(ans)) - packet = rest - } - if len(packet) != 0 { - return nil, errors.New("ssh: junk at end of message") - } - - return answers, nil -} diff --git a/vendor/golang.org/x/crypto/ssh/session.go b/vendor/golang.org/x/crypto/ssh/session.go deleted file mode 100644 index 17e2aa85c..000000000 --- a/vendor/golang.org/x/crypto/ssh/session.go +++ /dev/null @@ -1,627 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -// Session implements an interactive session described in -// "RFC 4254, section 6". - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "sync" -) - -type Signal string - -// POSIX signals as listed in RFC 4254 Section 6.10. -const ( - SIGABRT Signal = "ABRT" - SIGALRM Signal = "ALRM" - SIGFPE Signal = "FPE" - SIGHUP Signal = "HUP" - SIGILL Signal = "ILL" - SIGINT Signal = "INT" - SIGKILL Signal = "KILL" - SIGPIPE Signal = "PIPE" - SIGQUIT Signal = "QUIT" - SIGSEGV Signal = "SEGV" - SIGTERM Signal = "TERM" - SIGUSR1 Signal = "USR1" - SIGUSR2 Signal = "USR2" -) - -var signals = map[Signal]int{ - SIGABRT: 6, - SIGALRM: 14, - SIGFPE: 8, - SIGHUP: 1, - SIGILL: 4, - SIGINT: 2, - SIGKILL: 9, - SIGPIPE: 13, - SIGQUIT: 3, - SIGSEGV: 11, - SIGTERM: 15, -} - -type TerminalModes map[uint8]uint32 - -// POSIX terminal mode flags as listed in RFC 4254 Section 8. -const ( - tty_OP_END = 0 - VINTR = 1 - VQUIT = 2 - VERASE = 3 - VKILL = 4 - VEOF = 5 - VEOL = 6 - VEOL2 = 7 - VSTART = 8 - VSTOP = 9 - VSUSP = 10 - VDSUSP = 11 - VREPRINT = 12 - VWERASE = 13 - VLNEXT = 14 - VFLUSH = 15 - VSWTCH = 16 - VSTATUS = 17 - VDISCARD = 18 - IGNPAR = 30 - PARMRK = 31 - INPCK = 32 - ISTRIP = 33 - INLCR = 34 - IGNCR = 35 - ICRNL = 36 - IUCLC = 37 - IXON = 38 - IXANY = 39 - IXOFF = 40 - IMAXBEL = 41 - ISIG = 50 - ICANON = 51 - XCASE = 52 - ECHO = 53 - ECHOE = 54 - ECHOK = 55 - ECHONL = 56 - NOFLSH = 57 - TOSTOP = 58 - IEXTEN = 59 - ECHOCTL = 60 - ECHOKE = 61 - PENDIN = 62 - OPOST = 70 - OLCUC = 71 - ONLCR = 72 - OCRNL = 73 - ONOCR = 74 - ONLRET = 75 - CS7 = 90 - CS8 = 91 - PARENB = 92 - PARODD = 93 - TTY_OP_ISPEED = 128 - TTY_OP_OSPEED = 129 -) - -// A Session represents a connection to a remote command or shell. -type Session struct { - // Stdin specifies the remote process's standard input. - // If Stdin is nil, the remote process reads from an empty - // bytes.Buffer. - Stdin io.Reader - - // Stdout and Stderr specify the remote process's standard - // output and error. - // - // If either is nil, Run connects the corresponding file - // descriptor to an instance of ioutil.Discard. There is a - // fixed amount of buffering that is shared for the two streams. - // If either blocks it may eventually cause the remote - // command to block. - Stdout io.Writer - Stderr io.Writer - - ch Channel // the channel backing this session - started bool // true once Start, Run or Shell is invoked. - copyFuncs []func() error - errors chan error // one send per copyFunc - - // true if pipe method is active - stdinpipe, stdoutpipe, stderrpipe bool - - // stdinPipeWriter is non-nil if StdinPipe has not been called - // and Stdin was specified by the user; it is the write end of - // a pipe connecting Session.Stdin to the stdin channel. - stdinPipeWriter io.WriteCloser - - exitStatus chan error -} - -// SendRequest sends an out-of-band channel request on the SSH channel -// underlying the session. -func (s *Session) SendRequest(name string, wantReply bool, payload []byte) (bool, error) { - return s.ch.SendRequest(name, wantReply, payload) -} - -func (s *Session) Close() error { - return s.ch.Close() -} - -// RFC 4254 Section 6.4. -type setenvRequest struct { - Name string - Value string -} - -// Setenv sets an environment variable that will be applied to any -// command executed by Shell or Run. -func (s *Session) Setenv(name, value string) error { - msg := setenvRequest{ - Name: name, - Value: value, - } - ok, err := s.ch.SendRequest("env", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: setenv failed") - } - return err -} - -// RFC 4254 Section 6.2. -type ptyRequestMsg struct { - Term string - Columns uint32 - Rows uint32 - Width uint32 - Height uint32 - Modelist string -} - -// RequestPty requests the association of a pty with the session on the remote host. -func (s *Session) RequestPty(term string, h, w int, termmodes TerminalModes) error { - var tm []byte - for k, v := range termmodes { - kv := struct { - Key byte - Val uint32 - }{k, v} - - tm = append(tm, Marshal(&kv)...) - } - tm = append(tm, tty_OP_END) - req := ptyRequestMsg{ - Term: term, - Columns: uint32(w), - Rows: uint32(h), - Width: uint32(w * 8), - Height: uint32(h * 8), - Modelist: string(tm), - } - ok, err := s.ch.SendRequest("pty-req", true, Marshal(&req)) - if err == nil && !ok { - err = errors.New("ssh: pty-req failed") - } - return err -} - -// RFC 4254 Section 6.5. -type subsystemRequestMsg struct { - Subsystem string -} - -// RequestSubsystem requests the association of a subsystem with the session on the remote host. -// A subsystem is a predefined command that runs in the background when the ssh session is initiated -func (s *Session) RequestSubsystem(subsystem string) error { - msg := subsystemRequestMsg{ - Subsystem: subsystem, - } - ok, err := s.ch.SendRequest("subsystem", true, Marshal(&msg)) - if err == nil && !ok { - err = errors.New("ssh: subsystem request failed") - } - return err -} - -// RFC 4254 Section 6.9. -type signalMsg struct { - Signal string -} - -// Signal sends the given signal to the remote process. -// sig is one of the SIG* constants. -func (s *Session) Signal(sig Signal) error { - msg := signalMsg{ - Signal: string(sig), - } - - _, err := s.ch.SendRequest("signal", false, Marshal(&msg)) - return err -} - -// RFC 4254 Section 6.5. -type execMsg struct { - Command string -} - -// Start runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start or Shell. -func (s *Session) Start(cmd string) error { - if s.started { - return errors.New("ssh: session already started") - } - req := execMsg{ - Command: cmd, - } - - ok, err := s.ch.SendRequest("exec", true, Marshal(&req)) - if err == nil && !ok { - err = fmt.Errorf("ssh: command %v failed", cmd) - } - if err != nil { - return err - } - return s.start() -} - -// Run runs cmd on the remote host. Typically, the remote -// server passes cmd to the shell for interpretation. -// A Session only accepts one call to Run, Start, Shell, Output, -// or CombinedOutput. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Run(cmd string) error { - err := s.Start(cmd) - if err != nil { - return err - } - return s.Wait() -} - -// Output runs cmd on the remote host and returns its standard output. -func (s *Session) Output(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - var b bytes.Buffer - s.Stdout = &b - err := s.Run(cmd) - return b.Bytes(), err -} - -type singleWriter struct { - b bytes.Buffer - mu sync.Mutex -} - -func (w *singleWriter) Write(p []byte) (int, error) { - w.mu.Lock() - defer w.mu.Unlock() - return w.b.Write(p) -} - -// CombinedOutput runs cmd on the remote host and returns its combined -// standard output and standard error. -func (s *Session) CombinedOutput(cmd string) ([]byte, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - var b singleWriter - s.Stdout = &b - s.Stderr = &b - err := s.Run(cmd) - return b.b.Bytes(), err -} - -// Shell starts a login shell on the remote host. A Session only -// accepts one call to Run, Start, Shell, Output, or CombinedOutput. -func (s *Session) Shell() error { - if s.started { - return errors.New("ssh: session already started") - } - - ok, err := s.ch.SendRequest("shell", true, nil) - if err == nil && !ok { - return errors.New("ssh: could not start shell") - } - if err != nil { - return err - } - return s.start() -} - -func (s *Session) start() error { - s.started = true - - type F func(*Session) - for _, setupFd := range []F{(*Session).stdin, (*Session).stdout, (*Session).stderr} { - setupFd(s) - } - - s.errors = make(chan error, len(s.copyFuncs)) - for _, fn := range s.copyFuncs { - go func(fn func() error) { - s.errors <- fn() - }(fn) - } - return nil -} - -// Wait waits for the remote command to exit. -// -// The returned error is nil if the command runs, has no problems -// copying stdin, stdout, and stderr, and exits with a zero exit -// status. -// -// If the remote server does not send an exit status, an error of type -// *ExitMissingError is returned. If the command completes -// unsuccessfully or is interrupted by a signal, the error is of type -// *ExitError. Other error types may be returned for I/O problems. -func (s *Session) Wait() error { - if !s.started { - return errors.New("ssh: session not started") - } - waitErr := <-s.exitStatus - - if s.stdinPipeWriter != nil { - s.stdinPipeWriter.Close() - } - var copyError error - for _ = range s.copyFuncs { - if err := <-s.errors; err != nil && copyError == nil { - copyError = err - } - } - if waitErr != nil { - return waitErr - } - return copyError -} - -func (s *Session) wait(reqs <-chan *Request) error { - wm := Waitmsg{status: -1} - // Wait for msg channel to be closed before returning. - for msg := range reqs { - switch msg.Type { - case "exit-status": - wm.status = int(binary.BigEndian.Uint32(msg.Payload)) - case "exit-signal": - var sigval struct { - Signal string - CoreDumped bool - Error string - Lang string - } - if err := Unmarshal(msg.Payload, &sigval); err != nil { - return err - } - - // Must sanitize strings? - wm.signal = sigval.Signal - wm.msg = sigval.Error - wm.lang = sigval.Lang - default: - // This handles keepalives and matches - // OpenSSH's behaviour. - if msg.WantReply { - msg.Reply(false, nil) - } - } - } - if wm.status == 0 { - return nil - } - if wm.status == -1 { - // exit-status was never sent from server - if wm.signal == "" { - // signal was not sent either. RFC 4254 - // section 6.10 recommends against this - // behavior, but it is allowed, so we let - // clients handle it. - return &ExitMissingError{} - } - wm.status = 128 - if _, ok := signals[Signal(wm.signal)]; ok { - wm.status += signals[Signal(wm.signal)] - } - } - - return &ExitError{wm} -} - -// ExitMissingError is returned if a session is torn down cleanly, but -// the server sends no confirmation of the exit status. -type ExitMissingError struct{} - -func (e *ExitMissingError) Error() string { - return "wait: remote command exited without exit status or exit signal" -} - -func (s *Session) stdin() { - if s.stdinpipe { - return - } - var stdin io.Reader - if s.Stdin == nil { - stdin = new(bytes.Buffer) - } else { - r, w := io.Pipe() - go func() { - _, err := io.Copy(w, s.Stdin) - w.CloseWithError(err) - }() - stdin, s.stdinPipeWriter = r, w - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.ch, stdin) - if err1 := s.ch.CloseWrite(); err == nil && err1 != io.EOF { - err = err1 - } - return err - }) -} - -func (s *Session) stdout() { - if s.stdoutpipe { - return - } - if s.Stdout == nil { - s.Stdout = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stdout, s.ch) - return err - }) -} - -func (s *Session) stderr() { - if s.stderrpipe { - return - } - if s.Stderr == nil { - s.Stderr = ioutil.Discard - } - s.copyFuncs = append(s.copyFuncs, func() error { - _, err := io.Copy(s.Stderr, s.ch.Stderr()) - return err - }) -} - -// sessionStdin reroutes Close to CloseWrite. -type sessionStdin struct { - io.Writer - ch Channel -} - -func (s *sessionStdin) Close() error { - return s.ch.CloseWrite() -} - -// StdinPipe returns a pipe that will be connected to the -// remote command's standard input when the command starts. -func (s *Session) StdinPipe() (io.WriteCloser, error) { - if s.Stdin != nil { - return nil, errors.New("ssh: Stdin already set") - } - if s.started { - return nil, errors.New("ssh: StdinPipe after process started") - } - s.stdinpipe = true - return &sessionStdin{s.ch, s.ch}, nil -} - -// StdoutPipe returns a pipe that will be connected to the -// remote command's standard output when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StdoutPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StdoutPipe() (io.Reader, error) { - if s.Stdout != nil { - return nil, errors.New("ssh: Stdout already set") - } - if s.started { - return nil, errors.New("ssh: StdoutPipe after process started") - } - s.stdoutpipe = true - return s.ch, nil -} - -// StderrPipe returns a pipe that will be connected to the -// remote command's standard error when the command starts. -// There is a fixed amount of buffering that is shared between -// stdout and stderr streams. If the StderrPipe reader is -// not serviced fast enough it may eventually cause the -// remote command to block. -func (s *Session) StderrPipe() (io.Reader, error) { - if s.Stderr != nil { - return nil, errors.New("ssh: Stderr already set") - } - if s.started { - return nil, errors.New("ssh: StderrPipe after process started") - } - s.stderrpipe = true - return s.ch.Stderr(), nil -} - -// newSession returns a new interactive session on the remote host. -func newSession(ch Channel, reqs <-chan *Request) (*Session, error) { - s := &Session{ - ch: ch, - } - s.exitStatus = make(chan error, 1) - go func() { - s.exitStatus <- s.wait(reqs) - }() - - return s, nil -} - -// An ExitError reports unsuccessful completion of a remote command. -type ExitError struct { - Waitmsg -} - -func (e *ExitError) Error() string { - return e.Waitmsg.String() -} - -// Waitmsg stores the information about an exited remote command -// as reported by Wait. -type Waitmsg struct { - status int - signal string - msg string - lang string -} - -// ExitStatus returns the exit status of the remote command. -func (w Waitmsg) ExitStatus() int { - return w.status -} - -// Signal returns the exit signal of the remote command if -// it was terminated violently. -func (w Waitmsg) Signal() string { - return w.signal -} - -// Msg returns the exit message given by the remote command -func (w Waitmsg) Msg() string { - return w.msg -} - -// Lang returns the language tag. See RFC 3066 -func (w Waitmsg) Lang() string { - return w.lang -} - -func (w Waitmsg) String() string { - str := fmt.Sprintf("Process exited with status %v", w.status) - if w.signal != "" { - str += fmt.Sprintf(" from signal %v", w.signal) - } - if w.msg != "" { - str += fmt.Sprintf(". Reason was: %v", w.msg) - } - return str -} diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go deleted file mode 100644 index 6151241ff..000000000 --- a/vendor/golang.org/x/crypto/ssh/tcpip.go +++ /dev/null @@ -1,407 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "errors" - "fmt" - "io" - "math/rand" - "net" - "strconv" - "strings" - "sync" - "time" -) - -// Listen requests the remote peer open a listening socket on -// addr. Incoming connections will be available by calling Accept on -// the returned net.Listener. The listener must be serviced, or the -// SSH connection may hang. -func (c *Client) Listen(n, addr string) (net.Listener, error) { - laddr, err := net.ResolveTCPAddr(n, addr) - if err != nil { - return nil, err - } - return c.ListenTCP(laddr) -} - -// Automatic port allocation is broken with OpenSSH before 6.0. See -// also https://bugzilla.mindrot.org/show_bug.cgi?id=2017. In -// particular, OpenSSH 5.9 sends a channelOpenMsg with port number 0, -// rather than the actual port number. This means you can never open -// two different listeners with auto allocated ports. We work around -// this by trying explicit ports until we succeed. - -const openSSHPrefix = "OpenSSH_" - -var portRandomizer = rand.New(rand.NewSource(time.Now().UnixNano())) - -// isBrokenOpenSSHVersion returns true if the given version string -// specifies a version of OpenSSH that is known to have a bug in port -// forwarding. -func isBrokenOpenSSHVersion(versionStr string) bool { - i := strings.Index(versionStr, openSSHPrefix) - if i < 0 { - return false - } - i += len(openSSHPrefix) - j := i - for ; j < len(versionStr); j++ { - if versionStr[j] < '0' || versionStr[j] > '9' { - break - } - } - version, _ := strconv.Atoi(versionStr[i:j]) - return version < 6 -} - -// autoPortListenWorkaround simulates automatic port allocation by -// trying random ports repeatedly. -func (c *Client) autoPortListenWorkaround(laddr *net.TCPAddr) (net.Listener, error) { - var sshListener net.Listener - var err error - const tries = 10 - for i := 0; i < tries; i++ { - addr := *laddr - addr.Port = 1024 + portRandomizer.Intn(60000) - sshListener, err = c.ListenTCP(&addr) - if err == nil { - laddr.Port = addr.Port - return sshListener, err - } - } - return nil, fmt.Errorf("ssh: listen on random port failed after %d tries: %v", tries, err) -} - -// RFC 4254 7.1 -type channelForwardMsg struct { - addr string - rport uint32 -} - -// ListenTCP requests the remote peer open a listening socket -// on laddr. Incoming connections will be available by calling -// Accept on the returned net.Listener. -func (c *Client) ListenTCP(laddr *net.TCPAddr) (net.Listener, error) { - if laddr.Port == 0 && isBrokenOpenSSHVersion(string(c.ServerVersion())) { - return c.autoPortListenWorkaround(laddr) - } - - m := channelForwardMsg{ - laddr.IP.String(), - uint32(laddr.Port), - } - // send message - ok, resp, err := c.SendRequest("tcpip-forward", true, Marshal(&m)) - if err != nil { - return nil, err - } - if !ok { - return nil, errors.New("ssh: tcpip-forward request denied by peer") - } - - // If the original port was 0, then the remote side will - // supply a real port number in the response. - if laddr.Port == 0 { - var p struct { - Port uint32 - } - if err := Unmarshal(resp, &p); err != nil { - return nil, err - } - laddr.Port = int(p.Port) - } - - // Register this forward, using the port number we obtained. - ch := c.forwards.add(*laddr) - - return &tcpListener{laddr, c, ch}, nil -} - -// forwardList stores a mapping between remote -// forward requests and the tcpListeners. -type forwardList struct { - sync.Mutex - entries []forwardEntry -} - -// forwardEntry represents an established mapping of a laddr on a -// remote ssh server to a channel connected to a tcpListener. -type forwardEntry struct { - laddr net.TCPAddr - c chan forward -} - -// forward represents an incoming forwarded tcpip connection. The -// arguments to add/remove/lookup should be address as specified in -// the original forward-request. -type forward struct { - newCh NewChannel // the ssh client channel underlying this forward - raddr *net.TCPAddr // the raddr of the incoming connection -} - -func (l *forwardList) add(addr net.TCPAddr) chan forward { - l.Lock() - defer l.Unlock() - f := forwardEntry{ - addr, - make(chan forward, 1), - } - l.entries = append(l.entries, f) - return f.c -} - -// See RFC 4254, section 7.2 -type forwardedTCPPayload struct { - Addr string - Port uint32 - OriginAddr string - OriginPort uint32 -} - -// parseTCPAddr parses the originating address from the remote into a *net.TCPAddr. -func parseTCPAddr(addr string, port uint32) (*net.TCPAddr, error) { - if port == 0 || port > 65535 { - return nil, fmt.Errorf("ssh: port number out of range: %d", port) - } - ip := net.ParseIP(string(addr)) - if ip == nil { - return nil, fmt.Errorf("ssh: cannot parse IP address %q", addr) - } - return &net.TCPAddr{IP: ip, Port: int(port)}, nil -} - -func (l *forwardList) handleChannels(in <-chan NewChannel) { - for ch := range in { - var payload forwardedTCPPayload - if err := Unmarshal(ch.ExtraData(), &payload); err != nil { - ch.Reject(ConnectionFailed, "could not parse forwarded-tcpip payload: "+err.Error()) - continue - } - - // RFC 4254 section 7.2 specifies that incoming - // addresses should list the address, in string - // format. It is implied that this should be an IP - // address, as it would be impossible to connect to it - // otherwise. - laddr, err := parseTCPAddr(payload.Addr, payload.Port) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - raddr, err := parseTCPAddr(payload.OriginAddr, payload.OriginPort) - if err != nil { - ch.Reject(ConnectionFailed, err.Error()) - continue - } - - if ok := l.forward(*laddr, *raddr, ch); !ok { - // Section 7.2, implementations MUST reject spurious incoming - // connections. - ch.Reject(Prohibited, "no forward for address") - continue - } - } -} - -// remove removes the forward entry, and the channel feeding its -// listener. -func (l *forwardList) remove(addr net.TCPAddr) { - l.Lock() - defer l.Unlock() - for i, f := range l.entries { - if addr.IP.Equal(f.laddr.IP) && addr.Port == f.laddr.Port { - l.entries = append(l.entries[:i], l.entries[i+1:]...) - close(f.c) - return - } - } -} - -// closeAll closes and clears all forwards. -func (l *forwardList) closeAll() { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - close(f.c) - } - l.entries = nil -} - -func (l *forwardList) forward(laddr, raddr net.TCPAddr, ch NewChannel) bool { - l.Lock() - defer l.Unlock() - for _, f := range l.entries { - if laddr.IP.Equal(f.laddr.IP) && laddr.Port == f.laddr.Port { - f.c <- forward{ch, &raddr} - return true - } - } - return false -} - -type tcpListener struct { - laddr *net.TCPAddr - - conn *Client - in <-chan forward -} - -// Accept waits for and returns the next connection to the listener. -func (l *tcpListener) Accept() (net.Conn, error) { - s, ok := <-l.in - if !ok { - return nil, io.EOF - } - ch, incoming, err := s.newCh.Accept() - if err != nil { - return nil, err - } - go DiscardRequests(incoming) - - return &tcpChanConn{ - Channel: ch, - laddr: l.laddr, - raddr: s.raddr, - }, nil -} - -// Close closes the listener. -func (l *tcpListener) Close() error { - m := channelForwardMsg{ - l.laddr.IP.String(), - uint32(l.laddr.Port), - } - - // this also closes the listener. - l.conn.forwards.remove(*l.laddr) - ok, _, err := l.conn.SendRequest("cancel-tcpip-forward", true, Marshal(&m)) - if err == nil && !ok { - err = errors.New("ssh: cancel-tcpip-forward failed") - } - return err -} - -// Addr returns the listener's network address. -func (l *tcpListener) Addr() net.Addr { - return l.laddr -} - -// Dial initiates a connection to the addr from the remote host. -// The resulting connection has a zero LocalAddr() and RemoteAddr(). -func (c *Client) Dial(n, addr string) (net.Conn, error) { - // Parse the address into host and numeric port. - host, portString, err := net.SplitHostPort(addr) - if err != nil { - return nil, err - } - port, err := strconv.ParseUint(portString, 10, 16) - if err != nil { - return nil, err - } - // Use a zero address for local and remote address. - zeroAddr := &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - ch, err := c.dial(net.IPv4zero.String(), 0, host, int(port)) - if err != nil { - return nil, err - } - return &tcpChanConn{ - Channel: ch, - laddr: zeroAddr, - raddr: zeroAddr, - }, nil -} - -// DialTCP connects to the remote address raddr on the network net, -// which must be "tcp", "tcp4", or "tcp6". If laddr is not nil, it is used -// as the local address for the connection. -func (c *Client) DialTCP(n string, laddr, raddr *net.TCPAddr) (net.Conn, error) { - if laddr == nil { - laddr = &net.TCPAddr{ - IP: net.IPv4zero, - Port: 0, - } - } - ch, err := c.dial(laddr.IP.String(), laddr.Port, raddr.IP.String(), raddr.Port) - if err != nil { - return nil, err - } - return &tcpChanConn{ - Channel: ch, - laddr: laddr, - raddr: raddr, - }, nil -} - -// RFC 4254 7.2 -type channelOpenDirectMsg struct { - raddr string - rport uint32 - laddr string - lport uint32 -} - -func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel, error) { - msg := channelOpenDirectMsg{ - raddr: raddr, - rport: uint32(rport), - laddr: laddr, - lport: uint32(lport), - } - ch, in, err := c.OpenChannel("direct-tcpip", Marshal(&msg)) - if err != nil { - return nil, err - } - go DiscardRequests(in) - return ch, err -} - -type tcpChan struct { - Channel // the backing channel -} - -// tcpChanConn fulfills the net.Conn interface without -// the tcpChan having to hold laddr or raddr directly. -type tcpChanConn struct { - Channel - laddr, raddr net.Addr -} - -// LocalAddr returns the local network address. -func (t *tcpChanConn) LocalAddr() net.Addr { - return t.laddr -} - -// RemoteAddr returns the remote network address. -func (t *tcpChanConn) RemoteAddr() net.Addr { - return t.raddr -} - -// SetDeadline sets the read and write deadlines associated -// with the connection. -func (t *tcpChanConn) SetDeadline(deadline time.Time) error { - if err := t.SetReadDeadline(deadline); err != nil { - return err - } - return t.SetWriteDeadline(deadline) -} - -// SetReadDeadline sets the read deadline. -// A zero value for t means Read will not time out. -// After the deadline, the error from Read will implement net.Error -// with Timeout() == true. -func (t *tcpChanConn) SetReadDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} - -// SetWriteDeadline exists to satisfy the net.Conn interface -// but is not implemented by this type. It always returns an error. -func (t *tcpChanConn) SetWriteDeadline(deadline time.Time) error { - return errors.New("ssh: tcpChan: deadline not supported") -} diff --git a/vendor/golang.org/x/crypto/ssh/transport.go b/vendor/golang.org/x/crypto/ssh/transport.go deleted file mode 100644 index 62fba629e..000000000 --- a/vendor/golang.org/x/crypto/ssh/transport.go +++ /dev/null @@ -1,333 +0,0 @@ -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ssh - -import ( - "bufio" - "errors" - "io" -) - -const ( - gcmCipherID = "aes128-gcm@openssh.com" - aes128cbcID = "aes128-cbc" - tripledescbcID = "3des-cbc" -) - -// packetConn represents a transport that implements packet based -// operations. -type packetConn interface { - // Encrypt and send a packet of data to the remote peer. - writePacket(packet []byte) error - - // Read a packet from the connection - readPacket() ([]byte, error) - - // Close closes the write-side of the connection. - Close() error -} - -// transport is the keyingTransport that implements the SSH packet -// protocol. -type transport struct { - reader connectionState - writer connectionState - - bufReader *bufio.Reader - bufWriter *bufio.Writer - rand io.Reader - - io.Closer -} - -// packetCipher represents a combination of SSH encryption/MAC -// protocol. A single instance should be used for one direction only. -type packetCipher interface { - // writePacket encrypts the packet and writes it to w. The - // contents of the packet are generally scrambled. - writePacket(seqnum uint32, w io.Writer, rand io.Reader, packet []byte) error - - // readPacket reads and decrypts a packet of data. The - // returned packet may be overwritten by future calls of - // readPacket. - readPacket(seqnum uint32, r io.Reader) ([]byte, error) -} - -// connectionState represents one side (read or write) of the -// connection. This is necessary because each direction has its own -// keys, and can even have its own algorithms -type connectionState struct { - packetCipher - seqNum uint32 - dir direction - pendingKeyChange chan packetCipher -} - -// prepareKeyChange sets up key material for a keychange. The key changes in -// both directions are triggered by reading and writing a msgNewKey packet -// respectively. -func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) error { - if ciph, err := newPacketCipher(t.reader.dir, algs.r, kexResult); err != nil { - return err - } else { - t.reader.pendingKeyChange <- ciph - } - - if ciph, err := newPacketCipher(t.writer.dir, algs.w, kexResult); err != nil { - return err - } else { - t.writer.pendingKeyChange <- ciph - } - - return nil -} - -// Read and decrypt next packet. -func (t *transport) readPacket() ([]byte, error) { - return t.reader.readPacket(t.bufReader) -} - -func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) { - packet, err := s.packetCipher.readPacket(s.seqNum, r) - s.seqNum++ - if err == nil && len(packet) == 0 { - err = errors.New("ssh: zero length packet") - } - - if len(packet) > 0 { - switch packet[0] { - case msgNewKeys: - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - return nil, errors.New("ssh: got bogus newkeys message.") - } - - case msgDisconnect: - // Transform a disconnect message into an - // error. Since this is lowest level at which - // we interpret message types, doing it here - // ensures that we don't have to handle it - // elsewhere. - var msg disconnectMsg - if err := Unmarshal(packet, &msg); err != nil { - return nil, err - } - return nil, &msg - } - } - - // The packet may point to an internal buffer, so copy the - // packet out here. - fresh := make([]byte, len(packet)) - copy(fresh, packet) - - return fresh, err -} - -func (t *transport) writePacket(packet []byte) error { - return t.writer.writePacket(t.bufWriter, t.rand, packet) -} - -func (s *connectionState) writePacket(w *bufio.Writer, rand io.Reader, packet []byte) error { - changeKeys := len(packet) > 0 && packet[0] == msgNewKeys - - err := s.packetCipher.writePacket(s.seqNum, w, rand, packet) - if err != nil { - return err - } - if err = w.Flush(); err != nil { - return err - } - s.seqNum++ - if changeKeys { - select { - case cipher := <-s.pendingKeyChange: - s.packetCipher = cipher - default: - panic("ssh: no key material for msgNewKeys") - } - } - return err -} - -func newTransport(rwc io.ReadWriteCloser, rand io.Reader, isClient bool) *transport { - t := &transport{ - bufReader: bufio.NewReader(rwc), - bufWriter: bufio.NewWriter(rwc), - rand: rand, - reader: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - writer: connectionState{ - packetCipher: &streamPacketCipher{cipher: noneCipher{}}, - pendingKeyChange: make(chan packetCipher, 1), - }, - Closer: rwc, - } - if isClient { - t.reader.dir = serverKeys - t.writer.dir = clientKeys - } else { - t.reader.dir = clientKeys - t.writer.dir = serverKeys - } - - return t -} - -type direction struct { - ivTag []byte - keyTag []byte - macKeyTag []byte -} - -var ( - serverKeys = direction{[]byte{'B'}, []byte{'D'}, []byte{'F'}} - clientKeys = direction{[]byte{'A'}, []byte{'C'}, []byte{'E'}} -) - -// generateKeys generates key material for IV, MAC and encryption. -func generateKeys(d direction, algs directionAlgorithms, kex *kexResult) (iv, key, macKey []byte) { - cipherMode := cipherModes[algs.Cipher] - macMode := macModes[algs.MAC] - - iv = make([]byte, cipherMode.ivSize) - key = make([]byte, cipherMode.keySize) - macKey = make([]byte, macMode.keySize) - - generateKeyMaterial(iv, d.ivTag, kex) - generateKeyMaterial(key, d.keyTag, kex) - generateKeyMaterial(macKey, d.macKeyTag, kex) - return -} - -// setupKeys sets the cipher and MAC keys from kex.K, kex.H and sessionId, as -// described in RFC 4253, section 6.4. direction should either be serverKeys -// (to setup server->client keys) or clientKeys (for client->server keys). -func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) { - iv, key, macKey := generateKeys(d, algs, kex) - - if algs.Cipher == gcmCipherID { - return newGCMCipher(iv, key, macKey) - } - - if algs.Cipher == aes128cbcID { - return newAESCBCCipher(iv, key, macKey, algs) - } - - if algs.Cipher == tripledescbcID { - return newTripleDESCBCCipher(iv, key, macKey, algs) - } - - c := &streamPacketCipher{ - mac: macModes[algs.MAC].new(macKey), - } - c.macResult = make([]byte, c.mac.Size()) - - var err error - c.cipher, err = cipherModes[algs.Cipher].createStream(key, iv) - if err != nil { - return nil, err - } - - return c, nil -} - -// generateKeyMaterial fills out with key material generated from tag, K, H -// and sessionId, as specified in RFC 4253, section 7.2. -func generateKeyMaterial(out, tag []byte, r *kexResult) { - var digestsSoFar []byte - - h := r.Hash.New() - for len(out) > 0 { - h.Reset() - h.Write(r.K) - h.Write(r.H) - - if len(digestsSoFar) == 0 { - h.Write(tag) - h.Write(r.SessionID) - } else { - h.Write(digestsSoFar) - } - - digest := h.Sum(nil) - n := copy(out, digest) - out = out[n:] - if len(out) > 0 { - digestsSoFar = append(digestsSoFar, digest...) - } - } -} - -const packageVersion = "SSH-2.0-Go" - -// Sends and receives a version line. The versionLine string should -// be US ASCII, start with "SSH-2.0-", and should not include a -// newline. exchangeVersions returns the other side's version line. -func exchangeVersions(rw io.ReadWriter, versionLine []byte) (them []byte, err error) { - // Contrary to the RFC, we do not ignore lines that don't - // start with "SSH-2.0-" to make the library usable with - // nonconforming servers. - for _, c := range versionLine { - // The spec disallows non US-ASCII chars, and - // specifically forbids null chars. - if c < 32 { - return nil, errors.New("ssh: junk character in version line") - } - } - if _, err = rw.Write(append(versionLine, '\r', '\n')); err != nil { - return - } - - them, err = readVersion(rw) - return them, err -} - -// maxVersionStringBytes is the maximum number of bytes that we'll -// accept as a version string. RFC 4253 section 4.2 limits this at 255 -// chars -const maxVersionStringBytes = 255 - -// Read version string as specified by RFC 4253, section 4.2. -func readVersion(r io.Reader) ([]byte, error) { - versionString := make([]byte, 0, 64) - var ok bool - var buf [1]byte - - for len(versionString) < maxVersionStringBytes { - _, err := io.ReadFull(r, buf[:]) - if err != nil { - return nil, err - } - // The RFC says that the version should be terminated with \r\n - // but several SSH servers actually only send a \n. - if buf[0] == '\n' { - ok = true - break - } - - // non ASCII chars are disallowed, but we are lenient, - // since Go doesn't use null-terminated strings. - - // The RFC allows a comment after a space, however, - // all of it (version and comments) goes into the - // session hash. - versionString = append(versionString, buf[0]) - } - - if !ok { - return nil, errors.New("ssh: overflow reading version string") - } - - // There might be a '\r' on the end which we should remove. - if len(versionString) > 0 && versionString[len(versionString)-1] == '\r' { - versionString = versionString[:len(versionString)-1] - } - return versionString, nil -} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index cfac4a440..74eae9b67 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -899,6 +899,7 @@ func Getpgrp() (pid int) { //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) +//sysnb Getsid(pid int) (sid int, err error) //sysnb Gettid() (tid int) //sys Getxattr(path string, attr string, dest []byte) (sz int, err error) //sys InotifyAddWatch(fd int, pathname string, mask uint32) (watchdesc int, err error) @@ -911,7 +912,7 @@ func Getpgrp() (pid int) { //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT -//sysnb prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) = SYS_PRLIMIT64 +//sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) //sys read(fd int, p []byte) (n int, err error) //sys Removexattr(path string, attr string) (err error) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index 80f6a1b0a..fa92387b1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 078c8f05a..b34d5c26f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 76e5f7c0b..2e5cb3984 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 72b79470a..0d584cc0d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ba55509ea..bf6f3603b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 2b1cc8473..8c86bd70b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 25f39db9d..f5d488b4a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 70702b516..5183711ec 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 94b93d3d0..4c7ed08cc 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 774b10ed8..beb83e4fd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -572,6 +572,17 @@ func Getrusage(who int, rusage *Rusage) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getsid(pid int) (sid int, err error) { + r0, _, e1 := RawSyscall(SYS_GETSID, uintptr(pid), 0, 0) + sid = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Gettid() (tid int) { r0, _, _ := RawSyscall(SYS_GETTID, 0, 0, 0) tid = int(r0) @@ -762,8 +773,8 @@ func PivotRoot(newroot string, putold string) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func prlimit(pid int, resource int, old *Rlimit, newlimit *Rlimit) (err error) { - _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(newlimit)), 0, 0) +func prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) { + _, _, e1 := RawSyscall6(SYS_PRLIMIT64, uintptr(pid), uintptr(resource), uintptr(unsafe.Pointer(newlimit)), uintptr(unsafe.Pointer(old)), 0, 0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 817ac9c29..35f11bd1b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,6 +1,6 @@ // +build arm,linux // Created by cgo -godefs - DO NOT EDIT -// cgo -godefs types_linux.go +// cgo -godefs types_linux.go | go run mkpost.go package unix @@ -155,6 +155,15 @@ type Flock_t struct { Pad_cgo_1 [4]byte } +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + type RawSockaddrInet4 struct { Family uint16 Port uint16 diff --git a/vendor/golang.org/x/text/unicode/norm/readwriter.go b/vendor/golang.org/x/text/unicode/norm/readwriter.go index 4fa0e04b2..d926ee903 100644 --- a/vendor/golang.org/x/text/unicode/norm/readwriter.go +++ b/vendor/golang.org/x/text/unicode/norm/readwriter.go @@ -112,7 +112,6 @@ func (r *normReader) Read(p []byte) (int, error) { } } } - panic("should not reach here") } // Reader returns a new reader that implements Read diff --git a/vendor/gopkg.in/tylerb/graceful.v1/.gitignore b/vendor/gopkg.in/tylerb/graceful.v1/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/gopkg.in/tylerb/graceful.v1/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/gopkg.in/tylerb/graceful.v1/.travis.yml b/vendor/gopkg.in/tylerb/graceful.v1/.travis.yml new file mode 100644 index 000000000..66fdff76d --- /dev/null +++ b/vendor/gopkg.in/tylerb/graceful.v1/.travis.yml @@ -0,0 +1,13 @@ +language: go +sudo: false +go: + - 1.7 + - 1.6.2 + - 1.5.4 + - 1.4.3 + - 1.3.3 +before_install: + - go get github.com/mattn/goveralls + - go get golang.org/x/tools/cmd/cover +script: + - $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/gopkg.in/tylerb/graceful.v1/LICENSE b/vendor/gopkg.in/tylerb/graceful.v1/LICENSE new file mode 100644 index 000000000..a4f2f281b --- /dev/null +++ b/vendor/gopkg.in/tylerb/graceful.v1/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Tyler Bunnell + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/tylerb/graceful.v1/README.md b/vendor/gopkg.in/tylerb/graceful.v1/README.md new file mode 100644 index 000000000..328c3acf8 --- /dev/null +++ b/vendor/gopkg.in/tylerb/graceful.v1/README.md @@ -0,0 +1,152 @@ +graceful [![GoDoc](https://godoc.org/github.com/tylerb/graceful?status.png)](http://godoc.org/github.com/tylerb/graceful) [![Build Status](https://travis-ci.org/tylerb/graceful.svg?branch=master)](https://travis-ci.org/tylerb/graceful) [![Coverage Status](https://coveralls.io/repos/tylerb/graceful/badge.svg)](https://coveralls.io/r/tylerb/graceful) [![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/tylerb/graceful?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) +======== + +Graceful is a Go 1.3+ package enabling graceful shutdown of http.Handler servers. + +## Installation + +To install, simply execute: + +``` +go get gopkg.in/tylerb/graceful.v1 +``` + +I am using [gopkg.in](http://labix.org/gopkg.in) to control releases. + +## Usage + +Using Graceful is easy. Simply create your http.Handler and pass it to the `Run` function: + +```go +package main + +import ( + "gopkg.in/tylerb/graceful.v1" + "net/http" + "fmt" + "time" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + graceful.Run(":3001",10*time.Second,mux) +} +``` + +Another example, using [Negroni](https://github.com/codegangsta/negroni), functions in much the same manner: + +```go +package main + +import ( + "github.com/codegangsta/negroni" + "gopkg.in/tylerb/graceful.v1" + "net/http" + "fmt" + "time" +) + +func main() { + mux := http.NewServeMux() + mux.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "Welcome to the home page!") + }) + + n := negroni.Classic() + n.UseHandler(mux) + //n.Run(":3000") + graceful.Run(":3001",10*time.Second,n) +} +``` + +In addition to Run there are the http.Server counterparts ListenAndServe, ListenAndServeTLS and Serve, which allow you to configure HTTPS, custom timeouts and error handling. +Graceful may also be used by instantiating its Server type directly, which embeds an http.Server: + +```go +mux := // ... + +srv := &graceful.Server{ + Timeout: 10 * time.Second, + + Server: &http.Server{ + Addr: ":1234", + Handler: mux, + }, +} + +srv.ListenAndServe() +``` + +This form allows you to set the ConnState callback, which works in the same way as in http.Server: + +```go +mux := // ... + +srv := &graceful.Server{ + Timeout: 10 * time.Second, + + ConnState: func(conn net.Conn, state http.ConnState) { + // conn has a new state + }, + + Server: &http.Server{ + Addr: ":1234", + Handler: mux, + }, +} + +srv.ListenAndServe() +``` + +## Behaviour + +When Graceful is sent a SIGINT or SIGTERM (possibly from ^C or a kill command), it: + +1. Disables keepalive connections. +2. Closes the listening socket, allowing another process to listen on that port immediately. +3. Starts a timer of `timeout` duration to give active requests a chance to finish. +4. When timeout expires, closes all active connections. +5. Closes the `stopChan`, waking up any blocking goroutines. +6. Returns from the function, allowing the server to terminate. + +## Notes + +If the `timeout` argument to `Run` is 0, the server never times out, allowing all active requests to complete. + +If you wish to stop the server in some way other than an OS signal, you may call the `Stop()` function. +This function stops the server, gracefully, using the new timeout value you provide. The `StopChan()` function +returns a channel on which you can block while waiting for the server to stop. This channel will be closed when +the server is stopped, allowing your execution to proceed. Multiple goroutines can block on this channel at the +same time and all will be signalled when stopping is complete. + +### Important things to note when setting `timeout` to 0: + +If you set the `timeout` to `0`, it waits for all connections to the server to disconnect before shutting down. +This means that even though requests over a connection have finished, it is possible for the client to hold the +connection open and block the server from shutting down indefinitely. + +This is especially evident when graceful is used to run HTTP/2 servers. Clients like Chrome and Firefox have been +observed to hold onto the open connection indefinitely over HTTP/2, preventing the server from shutting down. In +addition, there is also the risk of malicious clients holding and keeping the connection alive. + +It is understandable that sometimes, you might want to wait for the client indefinitely because they might be +uploading large files. In these type of cases, it is recommended that you set a reasonable timeout to kill the +connection, and have the client perform resumable uploads. For example, the client can divide the file into chunks +and reupload chunks that were in transit when the connection was terminated. + +## Contributing + +If you would like to contribute, please: + +1. Create a GitHub issue regarding the contribution. Features and bugs should be discussed beforehand. +2. Fork the repository. +3. Create a pull request with your solution. This pull request should reference and close the issues (Fix #2). + +All pull requests should: + +1. Pass [gometalinter -t .](https://github.com/alecthomas/gometalinter) with no warnings. +2. Be `go fmt` formatted. diff --git a/vendor/gopkg.in/tylerb/graceful.v1/graceful.go b/vendor/gopkg.in/tylerb/graceful.v1/graceful.go new file mode 100644 index 000000000..a5e2395e0 --- /dev/null +++ b/vendor/gopkg.in/tylerb/graceful.v1/graceful.go @@ -0,0 +1,487 @@ +package graceful + +import ( + "crypto/tls" + "log" + "net" + "net/http" + "os" + "os/signal" + "sync" + "syscall" + "time" +) + +// Server wraps an http.Server with graceful connection handling. +// It may be used directly in the same way as http.Server, or may +// be constructed with the global functions in this package. +// +// Example: +// srv := &graceful.Server{ +// Timeout: 5 * time.Second, +// Server: &http.Server{Addr: ":1234", Handler: handler}, +// } +// srv.ListenAndServe() +type Server struct { + *http.Server + + // Timeout is the duration to allow outstanding requests to survive + // before forcefully terminating them. + Timeout time.Duration + + // Limit the number of outstanding requests + ListenLimit int + + // TCPKeepAlive sets the TCP keep-alive timeouts on accepted + // connections. It prunes dead TCP connections ( e.g. closing + // laptop mid-download) + TCPKeepAlive time.Duration + + // ConnState specifies an optional callback function that is + // called when a client connection changes state. This is a proxy + // to the underlying http.Server's ConnState, and the original + // must not be set directly. + ConnState func(net.Conn, http.ConnState) + + // BeforeShutdown is an optional callback function that is called + // before the listener is closed. Returns true if shutdown is allowed + BeforeShutdown func() bool + + // ShutdownInitiated is an optional callback function that is called + // when shutdown is initiated. It can be used to notify the client + // side of long lived connections (e.g. websockets) to reconnect. + ShutdownInitiated func() + + // NoSignalHandling prevents graceful from automatically shutting down + // on SIGINT and SIGTERM. If set to true, you must shut down the server + // manually with Stop(). + NoSignalHandling bool + + // Logger used to notify of errors on startup and on stop. + Logger *log.Logger + + // LogFunc can be assigned with a logging function of your choice, allowing + // you to use whatever logging approach you would like + LogFunc func(format string, args ...interface{}) + + // Interrupted is true if the server is handling a SIGINT or SIGTERM + // signal and is thus shutting down. + Interrupted bool + + // interrupt signals the listener to stop serving connections, + // and the server to shut down. + interrupt chan os.Signal + + // stopLock is used to protect against concurrent calls to Stop + stopLock sync.Mutex + + // stopChan is the channel on which callers may block while waiting for + // the server to stop. + stopChan chan struct{} + + // chanLock is used to protect access to the various channel constructors. + chanLock sync.RWMutex + + // connections holds all connections managed by graceful + connections map[net.Conn]struct{} + + // idleConnections holds all idle connections managed by graceful + idleConnections map[net.Conn]struct{} +} + +// Run serves the http.Handler with graceful shutdown enabled. +// +// timeout is the duration to wait until killing active requests and stopping the server. +// If timeout is 0, the server never times out. It waits for all active requests to finish. +func Run(addr string, timeout time.Duration, n http.Handler) { + srv := &Server{ + Timeout: timeout, + TCPKeepAlive: 3 * time.Minute, + Server: &http.Server{Addr: addr, Handler: n}, + // Logger: DefaultLogger(), + } + + if err := srv.ListenAndServe(); err != nil { + if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") { + srv.logf("%s", err) + os.Exit(1) + } + } + +} + +// RunWithErr is an alternative version of Run function which can return error. +// +// Unlike Run this version will not exit the program if an error is encountered but will +// return it instead. +func RunWithErr(addr string, timeout time.Duration, n http.Handler) error { + srv := &Server{ + Timeout: timeout, + TCPKeepAlive: 3 * time.Minute, + Server: &http.Server{Addr: addr, Handler: n}, + Logger: DefaultLogger(), + } + + return srv.ListenAndServe() +} + +// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled. +// +// timeout is the duration to wait until killing active requests and stopping the server. +// If timeout is 0, the server never times out. It waits for all active requests to finish. +func ListenAndServe(server *http.Server, timeout time.Duration) error { + srv := &Server{Timeout: timeout, Server: server, Logger: DefaultLogger()} + return srv.ListenAndServe() +} + +// ListenAndServe is equivalent to http.Server.ListenAndServe with graceful shutdown enabled. +func (srv *Server) ListenAndServe() error { + // Create the listener so we can control their lifetime + addr := srv.Addr + if addr == "" { + addr = ":http" + } + conn, err := srv.newTCPListener(addr) + if err != nil { + return err + } + + return srv.Serve(conn) +} + +// ListenAndServeTLS is equivalent to http.Server.ListenAndServeTLS with graceful shutdown enabled. +// +// timeout is the duration to wait until killing active requests and stopping the server. +// If timeout is 0, the server never times out. It waits for all active requests to finish. +func ListenAndServeTLS(server *http.Server, certFile, keyFile string, timeout time.Duration) error { + srv := &Server{Timeout: timeout, Server: server, Logger: DefaultLogger()} + return srv.ListenAndServeTLS(certFile, keyFile) +} + +// ListenTLS is a convenience method that creates an https listener using the +// provided cert and key files. Use this method if you need access to the +// listener object directly. When ready, pass it to the Serve method. +func (srv *Server) ListenTLS(certFile, keyFile string) (net.Listener, error) { + // Create the listener ourselves so we can control its lifetime + addr := srv.Addr + if addr == "" { + addr = ":https" + } + + config := &tls.Config{} + if srv.TLSConfig != nil { + *config = *srv.TLSConfig + } + + var err error + config.Certificates = make([]tls.Certificate, 1) + config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + + // Enable http2 + enableHTTP2ForTLSConfig(config) + + conn, err := srv.newTCPListener(addr) + if err != nil { + return nil, err + } + + srv.TLSConfig = config + + tlsListener := tls.NewListener(conn, config) + return tlsListener, nil +} + +// Enable HTTP2ForTLSConfig explicitly enables http/2 for a TLS Config. This is due to changes in Go 1.7 where +// http servers are no longer automatically configured to enable http/2 if the server's TLSConfig is set. +// See https://github.com/golang/go/issues/15908 +func enableHTTP2ForTLSConfig(t *tls.Config) { + + if TLSConfigHasHTTP2Enabled(t) { + return + } + + t.NextProtos = append(t.NextProtos, "h2") +} + +// TLSConfigHasHTTP2Enabled checks to see if a given TLS Config has http2 enabled. +func TLSConfigHasHTTP2Enabled(t *tls.Config) bool { + for _, value := range t.NextProtos { + if value == "h2" { + return true + } + } + return false +} + +// ListenAndServeTLS is equivalent to http.Server.ListenAndServeTLS with graceful shutdown enabled. +func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { + l, err := srv.ListenTLS(certFile, keyFile) + if err != nil { + return err + } + + return srv.Serve(l) +} + +// ListenAndServeTLSConfig can be used with an existing TLS config and is equivalent to +// http.Server.ListenAndServeTLS with graceful shutdown enabled, +func (srv *Server) ListenAndServeTLSConfig(config *tls.Config) error { + addr := srv.Addr + if addr == "" { + addr = ":https" + } + + conn, err := srv.newTCPListener(addr) + if err != nil { + return err + } + + srv.TLSConfig = config + + tlsListener := tls.NewListener(conn, config) + return srv.Serve(tlsListener) +} + +// Serve is equivalent to http.Server.Serve with graceful shutdown enabled. +// +// timeout is the duration to wait until killing active requests and stopping the server. +// If timeout is 0, the server never times out. It waits for all active requests to finish. +func Serve(server *http.Server, l net.Listener, timeout time.Duration) error { + srv := &Server{Timeout: timeout, Server: server, Logger: DefaultLogger()} + + return srv.Serve(l) +} + +// Serve is equivalent to http.Server.Serve with graceful shutdown enabled. +func (srv *Server) Serve(listener net.Listener) error { + + if srv.ListenLimit != 0 { + listener = LimitListener(listener, srv.ListenLimit) + } + + // Make our stopchan + srv.StopChan() + + // Track connection state + add := make(chan net.Conn) + idle := make(chan net.Conn) + active := make(chan net.Conn) + remove := make(chan net.Conn) + + srv.Server.ConnState = func(conn net.Conn, state http.ConnState) { + switch state { + case http.StateNew: + add <- conn + case http.StateActive: + active <- conn + case http.StateIdle: + idle <- conn + case http.StateClosed, http.StateHijacked: + remove <- conn + } + + srv.stopLock.Lock() + defer srv.stopLock.Unlock() + + if srv.ConnState != nil { + srv.ConnState(conn, state) + } + } + + // Manage open connections + shutdown := make(chan chan struct{}) + kill := make(chan struct{}) + go srv.manageConnections(add, idle, active, remove, shutdown, kill) + + interrupt := srv.interruptChan() + // Set up the interrupt handler + if !srv.NoSignalHandling { + signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) + } + quitting := make(chan struct{}) + go srv.handleInterrupt(interrupt, quitting, listener) + + // Serve with graceful listener. + // Execution blocks here until listener.Close() is called, above. + err := srv.Server.Serve(listener) + if err != nil { + // If the underlying listening is closed, Serve returns an error + // complaining about listening on a closed socket. This is expected, so + // let's ignore the error if we are the ones who explicitly closed the + // socket. + select { + case <-quitting: + err = nil + default: + } + } + + srv.shutdown(shutdown, kill) + + return err +} + +// Stop instructs the type to halt operations and close +// the stop channel when it is finished. +// +// timeout is grace period for which to wait before shutting +// down the server. The timeout value passed here will override the +// timeout given when constructing the server, as this is an explicit +// command to stop the server. +func (srv *Server) Stop(timeout time.Duration) { + srv.stopLock.Lock() + defer srv.stopLock.Unlock() + + srv.Timeout = timeout + interrupt := srv.interruptChan() + interrupt <- syscall.SIGINT +} + +// StopChan gets the stop channel which will block until +// stopping has completed, at which point it is closed. +// Callers should never close the stop channel. +func (srv *Server) StopChan() <-chan struct{} { + srv.chanLock.Lock() + defer srv.chanLock.Unlock() + + if srv.stopChan == nil { + srv.stopChan = make(chan struct{}) + } + return srv.stopChan +} + +// DefaultLogger returns the logger used by Run, RunWithErr, ListenAndServe, ListenAndServeTLS and Serve. +// The logger outputs to STDERR by default. +func DefaultLogger() *log.Logger { + return log.New(os.Stderr, "[graceful] ", 0) +} + +func (srv *Server) manageConnections(add, idle, active, remove chan net.Conn, shutdown chan chan struct{}, kill chan struct{}) { + var done chan struct{} + srv.connections = map[net.Conn]struct{}{} + srv.idleConnections = map[net.Conn]struct{}{} + for { + select { + case conn := <-add: + srv.connections[conn] = struct{}{} + case conn := <-idle: + srv.idleConnections[conn] = struct{}{} + case conn := <-active: + delete(srv.idleConnections, conn) + case conn := <-remove: + delete(srv.connections, conn) + delete(srv.idleConnections, conn) + if done != nil && len(srv.connections) == 0 { + done <- struct{}{} + return + } + case done = <-shutdown: + if len(srv.connections) == 0 && len(srv.idleConnections) == 0 { + done <- struct{}{} + return + } + // a shutdown request has been received. if we have open idle + // connections, we must close all of them now. this prevents idle + // connections from holding the server open while waiting for them to + // hit their idle timeout. + for k := range srv.idleConnections { + if err := k.Close(); err != nil { + srv.logf("[ERROR] %s", err) + } + } + case <-kill: + srv.stopLock.Lock() + defer srv.stopLock.Unlock() + + srv.Server.ConnState = nil + for k := range srv.connections { + if err := k.Close(); err != nil { + srv.logf("[ERROR] %s", err) + } + } + return + } + } +} + +func (srv *Server) interruptChan() chan os.Signal { + srv.chanLock.Lock() + defer srv.chanLock.Unlock() + + if srv.interrupt == nil { + srv.interrupt = make(chan os.Signal, 1) + } + + return srv.interrupt +} + +func (srv *Server) handleInterrupt(interrupt chan os.Signal, quitting chan struct{}, listener net.Listener) { + for _ = range interrupt { + if srv.Interrupted { + srv.logf("already shutting down") + continue + } + srv.logf("shutdown initiated") + srv.Interrupted = true + if srv.BeforeShutdown != nil { + if !srv.BeforeShutdown() { + srv.Interrupted = false + continue + } + } + + close(quitting) + srv.SetKeepAlivesEnabled(false) + if err := listener.Close(); err != nil { + srv.logf("[ERROR] %s", err) + } + + if srv.ShutdownInitiated != nil { + srv.ShutdownInitiated() + } + } +} + +func (srv *Server) logf(format string, args ...interface{}) { + if srv.LogFunc != nil { + srv.LogFunc(format, args...) + } else if srv.Logger != nil { + srv.Logger.Printf(format, args...) + } +} + +func (srv *Server) shutdown(shutdown chan chan struct{}, kill chan struct{}) { + // Request done notification + done := make(chan struct{}) + shutdown <- done + + if srv.Timeout > 0 { + select { + case <-done: + case <-time.After(srv.Timeout): + close(kill) + } + } else { + <-done + } + // Close the stopChan to wake up any blocked goroutines. + srv.chanLock.Lock() + if srv.stopChan != nil { + close(srv.stopChan) + } + srv.chanLock.Unlock() +} + +func (srv *Server) newTCPListener(addr string) (net.Listener, error) { + conn, err := net.Listen("tcp", addr) + if err != nil { + return conn, err + } + if srv.TCPKeepAlive != 0 { + conn = keepAliveListener{conn, srv.TCPKeepAlive} + } + return conn, nil +} diff --git a/vendor/gopkg.in/tylerb/graceful.v1/keepalive_listener.go b/vendor/gopkg.in/tylerb/graceful.v1/keepalive_listener.go new file mode 100644 index 000000000..1484bc213 --- /dev/null +++ b/vendor/gopkg.in/tylerb/graceful.v1/keepalive_listener.go @@ -0,0 +1,32 @@ +package graceful + +import ( + "net" + "time" +) + +type keepAliveConn interface { + SetKeepAlive(bool) error + SetKeepAlivePeriod(d time.Duration) error +} + +// keepAliveListener sets TCP keep-alive timeouts on accepted +// connections. It's used by ListenAndServe and ListenAndServeTLS so +// dead TCP connections (e.g. closing laptop mid-download) eventually +// go away. +type keepAliveListener struct { + net.Listener + keepAlivePeriod time.Duration +} + +func (ln keepAliveListener) Accept() (net.Conn, error) { + c, err := ln.Listener.Accept() + if err != nil { + return nil, err + } + + kac := c.(keepAliveConn) + kac.SetKeepAlive(true) + kac.SetKeepAlivePeriod(ln.keepAlivePeriod) + return c, nil +} diff --git a/vendor/gopkg.in/tylerb/graceful.v1/limit_listen.go b/vendor/gopkg.in/tylerb/graceful.v1/limit_listen.go new file mode 100644 index 000000000..ce32ce992 --- /dev/null +++ b/vendor/gopkg.in/tylerb/graceful.v1/limit_listen.go @@ -0,0 +1,77 @@ +// Copyright 2013 The etcd Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package graceful + +import ( + "errors" + "net" + "sync" + "time" +) + +// ErrNotTCP indicates that network connection is not a TCP connection. +var ErrNotTCP = errors.New("only tcp connections have keepalive") + +// LimitListener returns a Listener that accepts at most n simultaneous +// connections from the provided Listener. +func LimitListener(l net.Listener, n int) net.Listener { + return &limitListener{l, make(chan struct{}, n)} +} + +type limitListener struct { + net.Listener + sem chan struct{} +} + +func (l *limitListener) acquire() { l.sem <- struct{}{} } +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + l.acquire() + c, err := l.Listener.Accept() + if err != nil { + l.release() + return nil, err + } + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} + +func (l *limitListenerConn) SetKeepAlive(doKeepAlive bool) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlive(doKeepAlive) +} + +func (l *limitListenerConn) SetKeepAlivePeriod(d time.Duration) error { + tcpc, ok := l.Conn.(*net.TCPConn) + if !ok { + return ErrNotTCP + } + return tcpc.SetKeepAlivePeriod(d) +}