From c85d508a65a45655f7f62c3d14776efb867f6646 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Thu, 20 Jun 2019 23:21:21 +0800 Subject: [PATCH 1/5] fix error log when loading issues caused by a xorm bug --- go.mod | 12 +- go.sum | 21 +- models/action.go | 2 +- models/issue.go | 3 +- models/issue_comment.go | 2 +- models/issue_list.go | 76 +- models/issue_reaction.go | 2 +- models/issue_tracked_time.go | 2 +- models/log.go | 2 +- models/login_source.go | 2 +- models/migrations/v31.go | 2 +- models/migrations/v38.go | 2 +- models/migrations/v75.go | 2 +- models/migrations/v78.go | 2 +- models/migrations/v85.go | 2 +- models/models.go | 2 +- models/org.go | 2 +- models/release.go | 2 +- models/repo.go | 2 +- models/repo_list.go | 2 +- models/repo_unit.go | 2 +- models/review.go | 4 +- models/ssh_key.go | 2 +- models/topic.go | 2 +- models/unit_tests.go | 2 +- models/user.go | 4 +- routers/admin/auths.go | 2 +- vendor/github.com/go-xorm/builder/go.mod | 1 - vendor/github.com/go-xorm/core/circle.yml | 15 - vendor/github.com/go-xorm/core/db.go | 401 ---------- vendor/github.com/go-xorm/core/go.mod | 1 - vendor/github.com/go-xorm/xorm/.drone.yml | 4 +- vendor/github.com/go-xorm/xorm/README.md | 14 +- vendor/github.com/go-xorm/xorm/README_CN.md | 10 +- vendor/github.com/go-xorm/xorm/cache_lru.go | 2 +- .../go-xorm/xorm/cache_memory_store.go | 2 +- vendor/github.com/go-xorm/xorm/circle.yml | 41 -- .../github.com/go-xorm/xorm/dialect_mssql.go | 26 +- .../github.com/go-xorm/xorm/dialect_mysql.go | 5 +- .../github.com/go-xorm/xorm/dialect_oracle.go | 2 +- .../go-xorm/xorm/dialect_postgres.go | 18 +- .../go-xorm/xorm/dialect_sqlite3.go | 2 +- vendor/github.com/go-xorm/xorm/engine.go | 24 +- vendor/github.com/go-xorm/xorm/engine_cond.go | 13 +- .../github.com/go-xorm/xorm/engine_context.go | 28 + .../github.com/go-xorm/xorm/engine_group.go | 17 +- .../github.com/go-xorm/xorm/engine_table.go | 4 +- vendor/github.com/go-xorm/xorm/error.go | 2 + vendor/github.com/go-xorm/xorm/go.mod | 22 +- vendor/github.com/go-xorm/xorm/go.sum | 50 +- vendor/github.com/go-xorm/xorm/helpers.go | 2 +- vendor/github.com/go-xorm/xorm/interface.go | 12 +- vendor/github.com/go-xorm/xorm/json.go | 31 + vendor/github.com/go-xorm/xorm/logger.go | 2 +- vendor/github.com/go-xorm/xorm/rows.go | 37 +- vendor/github.com/go-xorm/xorm/session.go | 36 +- .../github.com/go-xorm/xorm/session_cols.go | 2 +- .../github.com/go-xorm/xorm/session_cond.go | 2 +- .../xorm/{context.go => session_context.go} | 13 +- .../go-xorm/xorm/session_convert.go | 23 +- .../github.com/go-xorm/xorm/session_delete.go | 6 +- .../github.com/go-xorm/xorm/session_exist.go | 18 +- .../github.com/go-xorm/xorm/session_find.go | 8 +- vendor/github.com/go-xorm/xorm/session_get.go | 6 +- .../github.com/go-xorm/xorm/session_insert.go | 175 ++++- .../go-xorm/xorm/session_iterate.go | 4 + .../github.com/go-xorm/xorm/session_query.go | 26 +- vendor/github.com/go-xorm/xorm/session_raw.go | 34 +- .../github.com/go-xorm/xorm/session_schema.go | 4 +- vendor/github.com/go-xorm/xorm/session_tx.go | 2 +- .../github.com/go-xorm/xorm/session_update.go | 54 +- vendor/github.com/go-xorm/xorm/statement.go | 48 +- vendor/github.com/go-xorm/xorm/syslogger.go | 2 +- vendor/github.com/go-xorm/xorm/tag.go | 2 +- vendor/github.com/go-xorm/xorm/test_mssql.sh | 2 +- vendor/github.com/go-xorm/xorm/test_tidb.sh | 1 + vendor/github.com/go-xorm/xorm/types.go | 6 +- vendor/github.com/go-xorm/xorm/xorm.go | 20 +- .../appengine/internal/api.go | 17 +- .../appengine/internal/api_pre17.go | 682 ------------------ .../appengine/internal/identity.go | 47 +- .../appengine/internal/identity_classic.go | 4 + .../appengine/internal/identity_flex.go | 11 + .../appengine/internal/main.go | 1 + .../appengine/internal/main_common.go | 7 + .../appengine/internal/main_vm.go | 21 + vendor/modules.txt | 16 +- .../go-xorm => xorm.io}/builder/.drone.yml | 0 .../go-xorm => xorm.io}/builder/LICENSE | 0 .../go-xorm => xorm.io}/builder/README.md | 0 .../go-xorm => xorm.io}/builder/builder.go | 0 .../builder/builder_delete.go | 0 .../builder/builder_insert.go | 0 .../builder/builder_limit.go | 0 .../builder/builder_select.go | 0 .../builder/builder_union.go | 0 .../builder/builder_update.go | 0 .../go-xorm => xorm.io}/builder/cond.go | 0 .../go-xorm => xorm.io}/builder/cond_and.go | 0 .../builder/cond_between.go | 0 .../builder/cond_compare.go | 0 .../go-xorm => xorm.io}/builder/cond_eq.go | 0 .../go-xorm => xorm.io}/builder/cond_expr.go | 0 vendor/xorm.io/builder/cond_if.go | 49 ++ .../go-xorm => xorm.io}/builder/cond_in.go | 0 .../go-xorm => xorm.io}/builder/cond_like.go | 0 .../go-xorm => xorm.io}/builder/cond_neq.go | 0 .../go-xorm => xorm.io}/builder/cond_not.go | 0 .../go-xorm => xorm.io}/builder/cond_notin.go | 0 .../go-xorm => xorm.io}/builder/cond_null.go | 0 .../go-xorm => xorm.io}/builder/cond_or.go | 0 .../go-xorm => xorm.io}/builder/doc.go | 0 .../go-xorm => xorm.io}/builder/error.go | 0 vendor/xorm.io/builder/go.mod | 6 + vendor/xorm.io/builder/go.sum | 9 + .../go-xorm => xorm.io}/builder/sql.go | 0 .../builder/string_builder.go | 0 .../go-xorm => xorm.io}/core/.gitignore | 0 .../go-xorm => xorm.io}/core/LICENSE | 0 .../go-xorm => xorm.io}/core/README.md | 0 .../go-xorm => xorm.io}/core/benchmark.sh | 0 .../go-xorm => xorm.io}/core/cache.go | 4 + .../go-xorm => xorm.io}/core/column.go | 11 +- .../go-xorm => xorm.io}/core/converstion.go | 4 + vendor/xorm.io/core/db.go | 223 ++++++ .../go-xorm => xorm.io}/core/dialect.go | 4 + .../go-xorm => xorm.io}/core/driver.go | 4 + .../go-xorm => xorm.io}/core/error.go | 4 + .../go-xorm => xorm.io}/core/filter.go | 4 + vendor/xorm.io/core/go.mod | 7 + vendor/xorm.io/core/go.sum | 9 + .../go-xorm => xorm.io}/core/ilogger.go | 4 + .../go-xorm => xorm.io}/core/index.go | 16 +- .../go-xorm => xorm.io}/core/mapper.go | 4 + .../go-xorm => xorm.io}/core/pk.go | 4 + .../go-xorm => xorm.io}/core/rows.go | 4 + .../go-xorm => xorm.io}/core/scan.go | 11 + vendor/xorm.io/core/stmt.go | 165 +++++ .../go-xorm => xorm.io}/core/table.go | 4 + vendor/xorm.io/core/tx.go | 153 ++++ .../go-xorm => xorm.io}/core/type.go | 18 +- 141 files changed, 1462 insertions(+), 1509 deletions(-) delete mode 100644 vendor/github.com/go-xorm/builder/go.mod delete mode 100644 vendor/github.com/go-xorm/core/circle.yml delete mode 100644 vendor/github.com/go-xorm/core/db.go delete mode 100644 vendor/github.com/go-xorm/core/go.mod delete mode 100644 vendor/github.com/go-xorm/xorm/circle.yml create mode 100644 vendor/github.com/go-xorm/xorm/engine_context.go create mode 100644 vendor/github.com/go-xorm/xorm/json.go rename vendor/github.com/go-xorm/xorm/{context.go => session_context.go} (60%) create mode 100644 vendor/github.com/go-xorm/xorm/test_tidb.sh delete mode 100644 vendor/google.golang.org/appengine/internal/api_pre17.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go create mode 100644 vendor/google.golang.org/appengine/internal/main_common.go rename vendor/{github.com/go-xorm => xorm.io}/builder/.drone.yml (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/LICENSE (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/README.md (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/builder.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/builder_delete.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/builder_insert.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/builder_limit.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/builder_select.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/builder_union.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/builder_update.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_and.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_between.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_compare.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_eq.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_expr.go (100%) create mode 100644 vendor/xorm.io/builder/cond_if.go rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_in.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_like.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_neq.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_not.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_notin.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_null.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/cond_or.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/doc.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/error.go (100%) create mode 100644 vendor/xorm.io/builder/go.mod create mode 100644 vendor/xorm.io/builder/go.sum rename vendor/{github.com/go-xorm => xorm.io}/builder/sql.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/builder/string_builder.go (100%) rename vendor/{github.com/go-xorm => xorm.io}/core/.gitignore (100%) rename vendor/{github.com/go-xorm => xorm.io}/core/LICENSE (100%) rename vendor/{github.com/go-xorm => xorm.io}/core/README.md (100%) rename vendor/{github.com/go-xorm => xorm.io}/core/benchmark.sh (100%) rename vendor/{github.com/go-xorm => xorm.io}/core/cache.go (92%) rename vendor/{github.com/go-xorm => xorm.io}/core/column.go (88%) rename vendor/{github.com/go-xorm => xorm.io}/core/converstion.go (59%) create mode 100644 vendor/xorm.io/core/db.go rename vendor/{github.com/go-xorm => xorm.io}/core/dialect.go (97%) rename vendor/{github.com/go-xorm => xorm.io}/core/driver.go (75%) rename vendor/{github.com/go-xorm => xorm.io}/core/error.go (51%) rename vendor/{github.com/go-xorm => xorm.io}/core/filter.go (90%) create mode 100644 vendor/xorm.io/core/go.mod create mode 100644 vendor/xorm.io/core/go.sum rename vendor/{github.com/go-xorm => xorm.io}/core/ilogger.go (78%) rename vendor/{github.com/go-xorm => xorm.io}/core/index.go (78%) rename vendor/{github.com/go-xorm => xorm.io}/core/mapper.go (96%) rename vendor/{github.com/go-xorm => xorm.io}/core/pk.go (72%) rename vendor/{github.com/go-xorm => xorm.io}/core/rows.go (97%) rename vendor/{github.com/go-xorm => xorm.io}/core/scan.go (79%) create mode 100644 vendor/xorm.io/core/stmt.go rename vendor/{github.com/go-xorm => xorm.io}/core/table.go (95%) create mode 100644 vendor/xorm.io/core/tx.go rename vendor/{github.com/go-xorm => xorm.io}/core/type.go (92%) diff --git a/go.mod b/go.mod index b1a638ed74fb7..9d957cea45656 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( github.com/cznic/b v0.0.0-20181122101859-a26611c4d92d // indirect github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 // indirect github.com/cznic/strutil v0.0.0-20181122101858-275e90344537 // indirect - github.com/denisenkom/go-mssqldb v0.0.0-20181014144952-4e0d7dc8888f + github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 // indirect github.com/emirpasic/gods v1.12.0 @@ -54,10 +54,9 @@ require ( github.com/go-macaron/session v0.0.0-20190131233854-0a0a789bf193 github.com/go-macaron/toolbox v0.0.0-20180818072302-a77f45a7ce90 github.com/go-redis/redis v6.15.2+incompatible - github.com/go-sql-driver/mysql v1.4.0 - github.com/go-xorm/builder v0.3.3 - github.com/go-xorm/core v0.6.0 - github.com/go-xorm/xorm v0.0.0-20190116032649-a6300f2a45e0 + github.com/go-sql-driver/mysql v1.4.1 + github.com/go-xorm/core v0.6.0 // indirect + github.com/go-xorm/xorm v0.7.3-0.20190620151208-f1b4f8368459 github.com/gogits/chardet v0.0.0-20150115103509-2404f7772561 github.com/gogits/cron v0.0.0-20160810035002-7f3990acf183 github.com/gogo/protobuf v1.2.1 // indirect @@ -133,9 +132,12 @@ require ( gopkg.in/redis.v2 v2.3.2 // indirect gopkg.in/src-d/go-billy.v4 v4.3.0 gopkg.in/src-d/go-git.v4 v4.12.0 + gopkg.in/stretchr/testify.v1 v1.2.2 // indirect gopkg.in/testfixtures.v2 v2.5.0 mvdan.cc/xurls/v2 v2.0.0 strk.kbt.io/projects/go/libravatar v0.0.0-20160628055650-5eed7bff870a + xorm.io/builder v0.3.5 + xorm.io/core v0.6.3 ) replace ( diff --git a/go.sum b/go.sum index f6542fbe49dae..7e0fc6cb27785 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,5 @@ cloud.google.com/go v0.30.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/PuerkitoBio/goquery v0.0.0-20170324135448-ed7d758e9a34 h1:UsHpWO0Elp6NaWVARdZHjiYwkhrspHVEGsyIKPb9OI8= @@ -119,15 +120,12 @@ github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDA github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f h1:fbIzwEaXt5b2bl9mm+PIufKTSGKk6ZuwSSTQ7iZj7Lo= github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-xorm/builder v0.3.2/go.mod h1:v8mE3MFBgtL+RGFNfUnAMUqqfk/Y4W5KuwCFQIEpQLk= -github.com/go-xorm/builder v0.3.3 h1:v8grgrwOGv/iHXIEhIvOwHZIPLrpxRKSX8yWSMLFn/4= -github.com/go-xorm/builder v0.3.3/go.mod h1:v8mE3MFBgtL+RGFNfUnAMUqqfk/Y4W5KuwCFQIEpQLk= github.com/go-xorm/core v0.6.0 h1:tp6hX+ku4OD9khFZS8VGBDRY3kfVCtelPfmkgCyHxL0= github.com/go-xorm/core v0.6.0/go.mod h1:d8FJ9Br8OGyQl12MCclmYBuBqqxsyeedpXciV5Myih8= github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y= github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:56xuuqnHyryaerycW3BfssRdxQstACi0Epw/yC5E2xM= -github.com/go-xorm/xorm v0.0.0-20190116032649-a6300f2a45e0 h1:GBnJjWjp2SGXBZsyZfYksyp7QocvQwf9vZQ0NRN2FXM= -github.com/go-xorm/xorm v0.0.0-20190116032649-a6300f2a45e0/go.mod h1:EHS1htMQFptzMaIHKyzqpHGw6C9Rtug75nsq6DA9unI= +github.com/go-xorm/xorm v0.7.3-0.20190620151208-f1b4f8368459 h1:JGEuhH169J7Wtm1hN/HFOGENsAq+6FDHfuhGEZj/1e4= +github.com/go-xorm/xorm v0.7.3-0.20190620151208-f1b4f8368459/go.mod h1:UK1YDlWscDspd23xW9HC24749jhvwO6riZ/HUt3gbHQ= github.com/gogits/chardet v0.0.0-20150115103509-2404f7772561 h1:deE7ritpK04PgtpyVOS2TYcQEld9qLCD5b5EbVNOuLA= github.com/gogits/chardet v0.0.0-20150115103509-2404f7772561/go.mod h1:YgYOrVn3Nj9Tq0EvjmFbphRytDj7JNRoWSStJZWDJTQ= github.com/gogits/cron v0.0.0-20160810035002-7f3990acf183 h1:EBTlva3AOSb80G3JSwY6ZMdILEZJ1JKuewrbqrNjWuE= @@ -170,8 +168,8 @@ github.com/issue9/identicon v0.0.0-20160320065130-d36b54562f4c h1:A/PDn117UYld5m github.com/issue9/identicon v0.0.0-20160320065130-d36b54562f4c/go.mod h1:5mTb/PQNkqmq2x3IxlQZE0aSnTksJg7fg/oWmJ5SKXQ= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible h1:0Vihzu20St42/UDsvZGdNE6jak7oi/UOeMzwMPHkgFY= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90= +github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= github.com/jaytaylor/html2text v0.0.0-20160923191438-8fb95d837f7d h1:ig/iUfDDg06RVW8OMby+GrmW6K2nPO3AFHlEIdvJSd4= github.com/jaytaylor/html2text v0.0.0-20160923191438-8fb95d837f7d/go.mod h1:CVKlgaMiht+LXvHG173ujK6JUhZXKb2u/BQtjPDIvyk= @@ -225,7 +223,6 @@ github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d h1:m+dSK37rFf2fqppZhg15yI2IwC9BtucBiRwSDm9VL8g= github.com/mattn/go-oci8 v0.0.0-20190320171441-14ba190cf52d/go.mod h1:/M9VLO+lUPmxvoOK2PfWRZ8mTtB4q1Hy9lEGijv9Nr8= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= @@ -255,7 +252,6 @@ github.com/pelletier/go-buffruneio v0.2.0 h1:U4t4R6YkofJ5xHm3dJzuRpPZ0mr5MMCoAWo github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/philhofer/fwd v1.0.0 h1:UbZqGr5Y38ApvM/V/jEljVxwocdweyH+vmYvRPBnbqQ= github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= @@ -328,6 +324,7 @@ github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wK go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190122013713-64072686203f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190418165655-df01cb2cc480 h1:O5YqonU5IWby+w98jVUG9h7zlCWCcH4RHyPVReBmhzk= @@ -376,6 +373,8 @@ golang.org/x/tools v0.0.0-20190618163018-fdf1049a943a/go.mod h1:/rFqwRUd4F7ZHNgw google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0 h1:S0iUepdCWODXRvtE+gcRDd15L+k+k1AiHlMiMjefH24= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc h1:2gGKlE2+asNV9m7xrywl36YYNnBG5ZQ0r/BOOxqPpmk= gopkg.in/alexcesaro/quotedprintable.v3 v3.0.0-20150716171945-2caba252f4dc/go.mod h1:m7x9LTH6d71AHyAX77c9yqWCCa3UKHcVEj9y7hAtKDk= gopkg.in/asn1-ber.v1 v1.0.0-20150924051756-4e86f4367175 h1:nn6Zav2sOQHCFJHEspya8KqxhFwKci30UxHy3HXPTyQ= @@ -419,3 +418,7 @@ mvdan.cc/xurls/v2 v2.0.0 h1:r1zSOSNS/kqtpmATyMMMvaZ4/djsesbYz5kr0+qMRWc= mvdan.cc/xurls/v2 v2.0.0/go.mod h1:2/webFPYOXN9jp/lzuj0zuAVlF+9g4KPFJANH1oJhRU= strk.kbt.io/projects/go/libravatar v0.0.0-20160628055650-5eed7bff870a h1:8q33ShxKXRwQ7JVd1ZnhIU3hZhwwn0Le+4fTeAackuM= strk.kbt.io/projects/go/libravatar v0.0.0-20160628055650-5eed7bff870a/go.mod h1:FJGmPh3vz9jSos1L/F91iAgnC/aejc0wIIrF2ZwJxdY= +xorm.io/builder v0.3.5 h1:EilU39fvWDxjb1cDaELpYhsF+zziRBhew8xk4pngO+A= +xorm.io/builder v0.3.5/go.mod h1:ZFbByS/KxZI1FKRjL05PyJ4YrK2bcxlUaAxdum5aTR8= +xorm.io/core v0.6.3 h1:n1NhVZt1s2oLw1BZfX2ocIJsHyso259uPgg63BGr37M= +xorm.io/core v0.6.3/go.mod h1:8kz/C6arVW/O9vk3PgCiMJO2hIAm1UcuOL3dSPyZ2qo= diff --git a/models/action.go b/models/action.go index ee5d052509337..89283930e9974 100644 --- a/models/action.go +++ b/models/action.go @@ -24,7 +24,7 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "github.com/go-xorm/builder" + "xorm.io/builder" ) // ActionType represents the type of an action. diff --git a/models/issue.go b/models/issue.go index 27298b8a86d2f..85544f38ee345 100644 --- a/models/issue.go +++ b/models/issue.go @@ -19,7 +19,7 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "github.com/go-xorm/builder" + "xorm.io/builder" "github.com/go-xorm/xorm" ) @@ -1428,6 +1428,7 @@ func Issues(opts *IssuesOptions) ([]*Issue, error) { if err := sess.Find(&issues); err != nil { return nil, fmt.Errorf("Find: %v", err) } + sess.Close() if err := IssueList(issues).LoadAttributes(); err != nil { return nil, fmt.Errorf("LoadAttributes: %v", err) diff --git a/models/issue_comment.go b/models/issue_comment.go index d75d9d7db1f6e..ec423c19d51c3 100644 --- a/models/issue_comment.go +++ b/models/issue_comment.go @@ -15,7 +15,7 @@ import ( "code.gitea.io/gitea/modules/markup/markdown" "code.gitea.io/gitea/modules/setting" "github.com/Unknwon/com" - "github.com/go-xorm/builder" + "xorm.io/builder" "github.com/go-xorm/xorm" api "code.gitea.io/gitea/modules/structs" diff --git a/models/issue_list.go b/models/issue_list.go index 4ddb32da1311d..e3516b55b948e 100644 --- a/models/issue_list.go +++ b/models/issue_list.go @@ -7,9 +7,7 @@ package models import ( "fmt" - "code.gitea.io/gitea/modules/log" - - "github.com/go-xorm/builder" + "xorm.io/builder" ) // IssueList defines a list of issues @@ -148,19 +146,17 @@ func (issues IssueList) loadLabels(e Engine) error { var labelIssue LabelIssue err = rows.Scan(&labelIssue) if err != nil { - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadLabels: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadLabels: Close: %v", err1) } return err } issueLabels[labelIssue.IssueLabel.IssueID] = append(issueLabels[labelIssue.IssueLabel.IssueID], labelIssue.Label) } - // When there are no rows left and we try to close it, xorm will complain with an error. + // When there are no rows left and we try to close it. // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadLabels: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadLabels: Close: %v", err1) } left -= limit issueIDs = issueIDs[limit:] @@ -241,20 +237,16 @@ func (issues IssueList) loadAssignees(e Engine) error { var assigneeIssue AssigneeIssue err = rows.Scan(&assigneeIssue) if err != nil { - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadAssignees: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadAssignees: Close: %v", err1) } return err } assignees[assigneeIssue.IssueAssignee.IssueID] = append(assignees[assigneeIssue.IssueAssignee.IssueID], assigneeIssue.Assignee) } - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadAssignees: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadAssignees: Close: %v", err1) } left -= limit issueIDs = issueIDs[limit:] @@ -300,19 +292,15 @@ func (issues IssueList) loadPullRequests(e Engine) error { var pr PullRequest err = rows.Scan(&pr) if err != nil { - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadPullRequests: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadPullRequests: Close: %v", err1) } return err } pullRequestMaps[pr.IssueID] = &pr } - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadPullRequests: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadPullRequests: Close: %v", err1) } left -= limit issuesIDs = issuesIDs[limit:] @@ -349,19 +337,15 @@ func (issues IssueList) loadAttachments(e Engine) (err error) { var attachment Attachment err = rows.Scan(&attachment) if err != nil { - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadAttachments: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadAttachments: Close: %v", err1) } return err } attachments[attachment.IssueID] = append(attachments[attachment.IssueID], &attachment) } - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadAttachments: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadAttachments: Close: %v", err1) } left -= limit issuesIDs = issuesIDs[limit:] @@ -399,19 +383,15 @@ func (issues IssueList) loadComments(e Engine, cond builder.Cond) (err error) { var comment Comment err = rows.Scan(&comment) if err != nil { - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadComments: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadComments: Close: %v", err1) } return err } comments[comment.IssueID] = append(comments[comment.IssueID], &comment) } - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadComments: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadComments: Close: %v", err1) } left -= limit issuesIDs = issuesIDs[limit:] @@ -461,19 +441,15 @@ func (issues IssueList) loadTotalTrackedTimes(e Engine) (err error) { var totalTime totalTimesByIssue err = rows.Scan(&totalTime) if err != nil { - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadTotalTrackedTimes: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadTotalTrackedTimes: Close: %v", err1) } return err } trackedTimes[totalTime.IssueID] = totalTime.Time } - // When there are no rows left and we try to close it, xorm will complain with an error. - // Since that is not relevant for us, we can safely ignore it. - if err := rows.Close(); err != nil { - log.Error("IssueList.loadTotalTrackedTimes: Close: %v", err) + if err1 := rows.Close(); err1 != nil { + return fmt.Errorf("IssueList.loadTotalTrackedTimes: Close: %v", err1) } left -= limit ids = ids[limit:] diff --git a/models/issue_reaction.go b/models/issue_reaction.go index 8f3ee7bfe2087..c3f45b649f217 100644 --- a/models/issue_reaction.go +++ b/models/issue_reaction.go @@ -11,7 +11,7 @@ import ( "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" - "github.com/go-xorm/builder" + "xorm.io/builder" "github.com/go-xorm/xorm" ) diff --git a/models/issue_tracked_time.go b/models/issue_tracked_time.go index 56ba525776a02..15c51fbfec2e9 100644 --- a/models/issue_tracked_time.go +++ b/models/issue_tracked_time.go @@ -10,7 +10,7 @@ import ( "code.gitea.io/gitea/modules/setting" api "code.gitea.io/gitea/modules/structs" - "github.com/go-xorm/builder" + "xorm.io/builder" "github.com/go-xorm/xorm" ) diff --git a/models/log.go b/models/log.go index 38d6caf07c979..4caea9a8b758e 100644 --- a/models/log.go +++ b/models/log.go @@ -9,7 +9,7 @@ import ( "code.gitea.io/gitea/modules/log" - "github.com/go-xorm/core" + "xorm.io/core" ) // XORMLogBridge a logger bridge from Logger to xorm diff --git a/models/login_source.go b/models/login_source.go index 8eefec4ae5873..c51e1c1fbee69 100644 --- a/models/login_source.go +++ b/models/login_source.go @@ -15,7 +15,7 @@ import ( "strings" "github.com/Unknwon/com" - "github.com/go-xorm/core" + "xorm.io/core" "github.com/go-xorm/xorm" "code.gitea.io/gitea/modules/auth/ldap" diff --git a/models/migrations/v31.go b/models/migrations/v31.go index b7ceecfc38051..354d9ed0c18ff 100644 --- a/models/migrations/v31.go +++ b/models/migrations/v31.go @@ -8,7 +8,7 @@ import ( "fmt" "time" - "github.com/go-xorm/core" + "xorm.io/core" "github.com/go-xorm/xorm" ) diff --git a/models/migrations/v38.go b/models/migrations/v38.go index eb90f9fbff537..d75cf4ea6eec0 100644 --- a/models/migrations/v38.go +++ b/models/migrations/v38.go @@ -9,7 +9,7 @@ import ( "code.gitea.io/gitea/models" - "github.com/go-xorm/core" + "xorm.io/core" "github.com/go-xorm/xorm" ) diff --git a/models/migrations/v75.go b/models/migrations/v75.go index 62f92b77db868..01e1b2a82e3a6 100644 --- a/models/migrations/v75.go +++ b/models/migrations/v75.go @@ -5,7 +5,7 @@ package migrations import ( - "github.com/go-xorm/builder" + "xorm.io/builder" "github.com/go-xorm/xorm" ) diff --git a/models/migrations/v78.go b/models/migrations/v78.go index 310c479d01b74..26474c878ff03 100644 --- a/models/migrations/v78.go +++ b/models/migrations/v78.go @@ -10,7 +10,7 @@ import ( "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/log" - "github.com/go-xorm/core" + "xorm.io/core" "github.com/go-xorm/xorm" ) diff --git a/models/migrations/v85.go b/models/migrations/v85.go index d511628b8d544..a1640ed9ce4b6 100644 --- a/models/migrations/v85.go +++ b/models/migrations/v85.go @@ -7,7 +7,7 @@ package migrations import ( "fmt" - "github.com/go-xorm/core" + "xorm.io/core" "github.com/go-xorm/xorm" "code.gitea.io/gitea/models" diff --git a/models/models.go b/models/models.go index 5752a8edd6dcf..858723889624e 100644 --- a/models/models.go +++ b/models/models.go @@ -20,7 +20,7 @@ import ( // Needed for the MySQL driver _ "github.com/go-sql-driver/mysql" - "github.com/go-xorm/core" + "xorm.io/core" "github.com/go-xorm/xorm" // Needed for the Postgresql driver diff --git a/models/org.go b/models/org.go index 65002eadff148..e0be575df0eb1 100644 --- a/models/org.go +++ b/models/org.go @@ -15,7 +15,7 @@ import ( "code.gitea.io/gitea/modules/structs" "github.com/Unknwon/com" - "github.com/go-xorm/builder" + "xorm.io/builder" "github.com/go-xorm/xorm" ) diff --git a/models/release.go b/models/release.go index 28a2891013582..036b5f78b9407 100644 --- a/models/release.go +++ b/models/release.go @@ -16,7 +16,7 @@ import ( api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" - "github.com/go-xorm/builder" + "xorm.io/builder" ) // Release represents a release of repository. diff --git a/models/repo.go b/models/repo.go index 215222e27941b..d0d010d77747a 100644 --- a/models/repo.go +++ b/models/repo.go @@ -37,7 +37,7 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "github.com/go-xorm/builder" + "xorm.io/builder" "github.com/go-xorm/xorm" ini "gopkg.in/ini.v1" ) diff --git a/models/repo_list.go b/models/repo_list.go index 5655404f7c71f..7460c4b0ede61 100644 --- a/models/repo_list.go +++ b/models/repo_list.go @@ -11,7 +11,7 @@ import ( "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" - "github.com/go-xorm/builder" + "xorm.io/builder" ) // RepositoryListDefaultPageSize is the default number of repositories diff --git a/models/repo_unit.go b/models/repo_unit.go index 430f5a242ff55..9c5da32fce17f 100644 --- a/models/repo_unit.go +++ b/models/repo_unit.go @@ -10,7 +10,7 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "github.com/go-xorm/core" + "xorm.io/core" "github.com/go-xorm/xorm" ) diff --git a/models/review.go b/models/review.go index 5f856fbd8973b..17241f024f648 100644 --- a/models/review.go +++ b/models/review.go @@ -11,8 +11,8 @@ import ( api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" "github.com/go-xorm/xorm" ) diff --git a/models/ssh_key.go b/models/ssh_key.go index 15a10826d8ea1..87c47b6db93bd 100644 --- a/models/ssh_key.go +++ b/models/ssh_key.go @@ -25,7 +25,7 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "github.com/go-xorm/builder" + "xorm.io/builder" "github.com/go-xorm/xorm" "golang.org/x/crypto/ssh" ) diff --git a/models/topic.go b/models/topic.go index da1815be76e69..666196ba8e786 100644 --- a/models/topic.go +++ b/models/topic.go @@ -11,7 +11,7 @@ import ( "code.gitea.io/gitea/modules/util" - "github.com/go-xorm/builder" + "xorm.io/builder" ) func init() { diff --git a/models/unit_tests.go b/models/unit_tests.go index 19fc95ea65771..01ad782f3bbf3 100644 --- a/models/unit_tests.go +++ b/models/unit_tests.go @@ -18,7 +18,7 @@ import ( "code.gitea.io/gitea/modules/setting" "github.com/Unknwon/com" - "github.com/go-xorm/core" + "xorm.io/core" "github.com/go-xorm/xorm" "github.com/stretchr/testify/assert" "gopkg.in/testfixtures.v2" diff --git a/models/user.go b/models/user.go index 4dc9aec6504f0..9723af125a1d5 100644 --- a/models/user.go +++ b/models/user.go @@ -32,8 +32,8 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" "github.com/go-xorm/xorm" "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/ssh" diff --git a/routers/admin/auths.go b/routers/admin/auths.go index 40b7df108d9b5..81751f89556cf 100644 --- a/routers/admin/auths.go +++ b/routers/admin/auths.go @@ -17,7 +17,7 @@ import ( "code.gitea.io/gitea/modules/setting" "github.com/Unknwon/com" - "github.com/go-xorm/core" + "xorm.io/core" ) const ( diff --git a/vendor/github.com/go-xorm/builder/go.mod b/vendor/github.com/go-xorm/builder/go.mod deleted file mode 100644 index ef1a659ad116b..0000000000000 --- a/vendor/github.com/go-xorm/builder/go.mod +++ /dev/null @@ -1 +0,0 @@ -module "github.com/go-xorm/builder" diff --git a/vendor/github.com/go-xorm/core/circle.yml b/vendor/github.com/go-xorm/core/circle.yml deleted file mode 100644 index e6a05be2727fe..0000000000000 --- a/vendor/github.com/go-xorm/core/circle.yml +++ /dev/null @@ -1,15 +0,0 @@ -dependencies: - override: - # './...' is a relative pattern which means all subdirectories - - go get -t -d -v ./... - - go build -v - -database: - override: - - mysql -u root -e "CREATE DATABASE core_test DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci" - -test: - override: - # './...' is a relative pattern which means all subdirectories - - go test -v -race - - go test -v -race --dbtype=sqlite3 diff --git a/vendor/github.com/go-xorm/core/db.go b/vendor/github.com/go-xorm/core/db.go deleted file mode 100644 index 9969fa4313493..0000000000000 --- a/vendor/github.com/go-xorm/core/db.go +++ /dev/null @@ -1,401 +0,0 @@ -package core - -import ( - "database/sql" - "database/sql/driver" - "errors" - "fmt" - "reflect" - "regexp" - "sync" -) - -var ( - DefaultCacheSize = 200 -) - -func MapToSlice(query string, mp interface{}) (string, []interface{}, error) { - vv := reflect.ValueOf(mp) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { - return "", []interface{}{}, ErrNoMapPointer - } - - args := make([]interface{}, 0, len(vv.Elem().MapKeys())) - var err error - query = re.ReplaceAllStringFunc(query, func(src string) string { - v := vv.Elem().MapIndex(reflect.ValueOf(src[1:])) - if !v.IsValid() { - err = fmt.Errorf("map key %s is missing", src[1:]) - } else { - args = append(args, v.Interface()) - } - return "?" - }) - - return query, args, err -} - -func StructToSlice(query string, st interface{}) (string, []interface{}, error) { - vv := reflect.ValueOf(st) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return "", []interface{}{}, ErrNoStructPointer - } - - args := make([]interface{}, 0) - var err error - query = re.ReplaceAllStringFunc(query, func(src string) string { - fv := vv.Elem().FieldByName(src[1:]).Interface() - if v, ok := fv.(driver.Valuer); ok { - var value driver.Value - value, err = v.Value() - if err != nil { - return "?" - } - args = append(args, value) - } else { - args = append(args, fv) - } - return "?" - }) - if err != nil { - return "", []interface{}{}, err - } - return query, args, nil -} - -type cacheStruct struct { - value reflect.Value - idx int -} - -type DB struct { - *sql.DB - Mapper IMapper - reflectCache map[reflect.Type]*cacheStruct - reflectCacheMutex sync.RWMutex -} - -func Open(driverName, dataSourceName string) (*DB, error) { - db, err := sql.Open(driverName, dataSourceName) - if err != nil { - return nil, err - } - return &DB{ - DB: db, - Mapper: NewCacheMapper(&SnakeMapper{}), - reflectCache: make(map[reflect.Type]*cacheStruct), - }, nil -} - -func FromDB(db *sql.DB) *DB { - return &DB{ - DB: db, - Mapper: NewCacheMapper(&SnakeMapper{}), - reflectCache: make(map[reflect.Type]*cacheStruct), - } -} - -func (db *DB) reflectNew(typ reflect.Type) reflect.Value { - db.reflectCacheMutex.Lock() - defer db.reflectCacheMutex.Unlock() - cs, ok := db.reflectCache[typ] - if !ok || cs.idx+1 > DefaultCacheSize-1 { - cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), DefaultCacheSize, DefaultCacheSize), 0} - db.reflectCache[typ] = cs - } else { - cs.idx = cs.idx + 1 - } - return cs.value.Index(cs.idx).Addr() -} - -func (db *DB) Query(query string, args ...interface{}) (*Rows, error) { - rows, err := db.DB.Query(query, args...) - if err != nil { - if rows != nil { - rows.Close() - } - return nil, err - } - return &Rows{rows, db}, nil -} - -func (db *DB) QueryMap(query string, mp interface{}) (*Rows, error) { - query, args, err := MapToSlice(query, mp) - if err != nil { - return nil, err - } - return db.Query(query, args...) -} - -func (db *DB) QueryStruct(query string, st interface{}) (*Rows, error) { - query, args, err := StructToSlice(query, st) - if err != nil { - return nil, err - } - return db.Query(query, args...) -} - -func (db *DB) QueryRow(query string, args ...interface{}) *Row { - rows, err := db.Query(query, args...) - if err != nil { - return &Row{nil, err} - } - return &Row{rows, nil} -} - -func (db *DB) QueryRowMap(query string, mp interface{}) *Row { - query, args, err := MapToSlice(query, mp) - if err != nil { - return &Row{nil, err} - } - return db.QueryRow(query, args...) -} - -func (db *DB) QueryRowStruct(query string, st interface{}) *Row { - query, args, err := StructToSlice(query, st) - if err != nil { - return &Row{nil, err} - } - return db.QueryRow(query, args...) -} - -type Stmt struct { - *sql.Stmt - db *DB - names map[string]int -} - -func (db *DB) Prepare(query string) (*Stmt, error) { - names := make(map[string]int) - var i int - query = re.ReplaceAllStringFunc(query, func(src string) string { - names[src[1:]] = i - i += 1 - return "?" - }) - - stmt, err := db.DB.Prepare(query) - if err != nil { - return nil, err - } - return &Stmt{stmt, db, names}, nil -} - -func (s *Stmt) ExecMap(mp interface{}) (sql.Result, error) { - vv := reflect.ValueOf(mp) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { - return nil, errors.New("mp should be a map's pointer") - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() - } - return s.Stmt.Exec(args...) -} - -func (s *Stmt) ExecStruct(st interface{}) (sql.Result, error) { - vv := reflect.ValueOf(st) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return nil, errors.New("mp should be a map's pointer") - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().FieldByName(k).Interface() - } - return s.Stmt.Exec(args...) -} - -func (s *Stmt) Query(args ...interface{}) (*Rows, error) { - rows, err := s.Stmt.Query(args...) - if err != nil { - return nil, err - } - return &Rows{rows, s.db}, nil -} - -func (s *Stmt) QueryMap(mp interface{}) (*Rows, error) { - vv := reflect.ValueOf(mp) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { - return nil, errors.New("mp should be a map's pointer") - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() - } - - return s.Query(args...) -} - -func (s *Stmt) QueryStruct(st interface{}) (*Rows, error) { - vv := reflect.ValueOf(st) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return nil, errors.New("mp should be a map's pointer") - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().FieldByName(k).Interface() - } - - return s.Query(args...) -} - -func (s *Stmt) QueryRow(args ...interface{}) *Row { - rows, err := s.Query(args...) - return &Row{rows, err} -} - -func (s *Stmt) QueryRowMap(mp interface{}) *Row { - vv := reflect.ValueOf(mp) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { - return &Row{nil, errors.New("mp should be a map's pointer")} - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() - } - - return s.QueryRow(args...) -} - -func (s *Stmt) QueryRowStruct(st interface{}) *Row { - vv := reflect.ValueOf(st) - if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { - return &Row{nil, errors.New("st should be a struct's pointer")} - } - - args := make([]interface{}, len(s.names)) - for k, i := range s.names { - args[i] = vv.Elem().FieldByName(k).Interface() - } - - return s.QueryRow(args...) -} - -var ( - re = regexp.MustCompile(`[?](\w+)`) -) - -// insert into (name) values (?) -// insert into (name) values (?name) -func (db *DB) ExecMap(query string, mp interface{}) (sql.Result, error) { - query, args, err := MapToSlice(query, mp) - if err != nil { - return nil, err - } - return db.DB.Exec(query, args...) -} - -func (db *DB) ExecStruct(query string, st interface{}) (sql.Result, error) { - query, args, err := StructToSlice(query, st) - if err != nil { - return nil, err - } - return db.DB.Exec(query, args...) -} - -type EmptyScanner struct { -} - -func (EmptyScanner) Scan(src interface{}) error { - return nil -} - -type Tx struct { - *sql.Tx - db *DB -} - -func (db *DB) Begin() (*Tx, error) { - tx, err := db.DB.Begin() - if err != nil { - return nil, err - } - return &Tx{tx, db}, nil -} - -func (tx *Tx) Prepare(query string) (*Stmt, error) { - names := make(map[string]int) - var i int - query = re.ReplaceAllStringFunc(query, func(src string) string { - names[src[1:]] = i - i += 1 - return "?" - }) - - stmt, err := tx.Tx.Prepare(query) - if err != nil { - return nil, err - } - return &Stmt{stmt, tx.db, names}, nil -} - -func (tx *Tx) Stmt(stmt *Stmt) *Stmt { - // TODO: - return stmt -} - -func (tx *Tx) ExecMap(query string, mp interface{}) (sql.Result, error) { - query, args, err := MapToSlice(query, mp) - if err != nil { - return nil, err - } - return tx.Tx.Exec(query, args...) -} - -func (tx *Tx) ExecStruct(query string, st interface{}) (sql.Result, error) { - query, args, err := StructToSlice(query, st) - if err != nil { - return nil, err - } - return tx.Tx.Exec(query, args...) -} - -func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) { - rows, err := tx.Tx.Query(query, args...) - if err != nil { - return nil, err - } - return &Rows{rows, tx.db}, nil -} - -func (tx *Tx) QueryMap(query string, mp interface{}) (*Rows, error) { - query, args, err := MapToSlice(query, mp) - if err != nil { - return nil, err - } - return tx.Query(query, args...) -} - -func (tx *Tx) QueryStruct(query string, st interface{}) (*Rows, error) { - query, args, err := StructToSlice(query, st) - if err != nil { - return nil, err - } - return tx.Query(query, args...) -} - -func (tx *Tx) QueryRow(query string, args ...interface{}) *Row { - rows, err := tx.Query(query, args...) - return &Row{rows, err} -} - -func (tx *Tx) QueryRowMap(query string, mp interface{}) *Row { - query, args, err := MapToSlice(query, mp) - if err != nil { - return &Row{nil, err} - } - return tx.QueryRow(query, args...) -} - -func (tx *Tx) QueryRowStruct(query string, st interface{}) *Row { - query, args, err := StructToSlice(query, st) - if err != nil { - return &Row{nil, err} - } - return tx.QueryRow(query, args...) -} diff --git a/vendor/github.com/go-xorm/core/go.mod b/vendor/github.com/go-xorm/core/go.mod deleted file mode 100644 index 70c86bcbc8338..0000000000000 --- a/vendor/github.com/go-xorm/core/go.mod +++ /dev/null @@ -1 +0,0 @@ -module "github.com/go-xorm/core" diff --git a/vendor/github.com/go-xorm/xorm/.drone.yml b/vendor/github.com/go-xorm/xorm/.drone.yml index 0a79ed0216093..df9d405bc2926 100644 --- a/vendor/github.com/go-xorm/xorm/.drone.yml +++ b/vendor/github.com/go-xorm/xorm/.drone.yml @@ -59,8 +59,8 @@ pipeline: image: golang:${GO_VERSION} commands: - go get -t -d -v ./... - - go get -u github.com/go-xorm/core - - go get -u github.com/go-xorm/builder + - go get -u xorm.io/core + - go get -u xorm.io/builder - go build -v when: event: [ push, pull_request ] diff --git a/vendor/github.com/go-xorm/xorm/README.md b/vendor/github.com/go-xorm/xorm/README.md index 6a57606e7fb01..2b839d520f809 100644 --- a/vendor/github.com/go-xorm/xorm/README.md +++ b/vendor/github.com/go-xorm/xorm/README.md @@ -28,7 +28,7 @@ Xorm is a simple and powerful ORM for Go. * Optimistic Locking support -* SQL Builder support via [github.com/go-xorm/builder](https://github.com/go-xorm/builder) +* SQL Builder support via [xorm.io/builder](https://xorm.io/builder) * Automatical Read/Write seperatelly @@ -151,20 +151,20 @@ has, err := engine.Where("name = ?", name).Desc("id").Get(&user) // SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1 var name string -has, err := engine.Where("id = ?", id).Cols("name").Get(&name) +has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name) // SELECT name FROM user WHERE id = ? var id int64 -has, err := engine.Where("name = ?", name).Cols("id").Get(&id) +has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id) has, err := engine.SQL("select id from user").Get(&id) // SELECT id FROM user WHERE name = ? var valuesMap = make(map[string]string) -has, err := engine.Where("id = ?", id).Get(&valuesMap) +has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap) // SELECT * FROM user WHERE id = ? var valuesSlice = make([]interface{}, len(cols)) -has, err := engine.Where("id = ?", id).Cols(cols...).Get(&valuesSlice) +has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice) // SELECT col1, col2, col3 FROM user WHERE id = ? ``` @@ -363,7 +363,7 @@ return session.Commit() * Or you can use `Transaction` to replace above codes. ```Go -res, err := engine.Transaction(func(sess *xorm.Session) (interface{}, error) { +res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) { user1 := Userinfo{Username: "xiaoxiao", Departname: "dev", Alias: "lunny", Created: time.Now()} if _, err := session.Insert(&user1); err != nil { return nil, err @@ -493,4 +493,4 @@ Support this project by becoming a sponsor. Your logo will show up here with a l ## LICENSE -BSD License [http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/) \ No newline at end of file +BSD License [http://creativecommons.org/licenses/BSD/](http://creativecommons.org/licenses/BSD/) diff --git a/vendor/github.com/go-xorm/xorm/README_CN.md b/vendor/github.com/go-xorm/xorm/README_CN.md index e2ed95b62c4fd..0cec6ed5c6f6d 100644 --- a/vendor/github.com/go-xorm/xorm/README_CN.md +++ b/vendor/github.com/go-xorm/xorm/README_CN.md @@ -153,20 +153,20 @@ has, err := engine.Where("name = ?", name).Desc("id").Get(&user) // SELECT * FROM user WHERE name = ? ORDER BY id DESC LIMIT 1 var name string -has, err := engine.Where("id = ?", id).Cols("name").Get(&name) +has, err := engine.Table(&user).Where("id = ?", id).Cols("name").Get(&name) // SELECT name FROM user WHERE id = ? var id int64 -has, err := engine.Where("name = ?", name).Cols("id").Get(&id) +has, err := engine.Table(&user).Where("name = ?", name).Cols("id").Get(&id) has, err := engine.SQL("select id from user").Get(&id) // SELECT id FROM user WHERE name = ? var valuesMap = make(map[string]string) -has, err := engine.Where("id = ?", id).Get(&valuesMap) +has, err := engine.Table(&user).Where("id = ?", id).Get(&valuesMap) // SELECT * FROM user WHERE id = ? var valuesSlice = make([]interface{}, len(cols)) -has, err := engine.Where("id = ?", id).Cols(cols...).Get(&valuesSlice) +has, err := engine.Table(&user).Where("id = ?", id).Cols(cols...).Get(&valuesSlice) // SELECT col1, col2, col3 FROM user WHERE id = ? ``` @@ -362,7 +362,7 @@ if _, err := session.Exec("delete from userinfo where username = ?", user2.Usern return session.Commit() ``` -* 事物的简写方法 +* 事务的简写方法 ```Go res, err := engine.Transaction(func(session *xorm.Session) (interface{}, error) { diff --git a/vendor/github.com/go-xorm/xorm/cache_lru.go b/vendor/github.com/go-xorm/xorm/cache_lru.go index c9672cebe4d5f..ab948bd28ec09 100644 --- a/vendor/github.com/go-xorm/xorm/cache_lru.go +++ b/vendor/github.com/go-xorm/xorm/cache_lru.go @@ -10,7 +10,7 @@ import ( "sync" "time" - "github.com/go-xorm/core" + "xorm.io/core" ) // LRUCacher implments cache object facilities diff --git a/vendor/github.com/go-xorm/xorm/cache_memory_store.go b/vendor/github.com/go-xorm/xorm/cache_memory_store.go index 36853b19e361c..0c483f45839e6 100644 --- a/vendor/github.com/go-xorm/xorm/cache_memory_store.go +++ b/vendor/github.com/go-xorm/xorm/cache_memory_store.go @@ -7,7 +7,7 @@ package xorm import ( "sync" - "github.com/go-xorm/core" + "xorm.io/core" ) var _ core.CacheStore = NewMemoryStore() diff --git a/vendor/github.com/go-xorm/xorm/circle.yml b/vendor/github.com/go-xorm/xorm/circle.yml deleted file mode 100644 index 8fde316921023..0000000000000 --- a/vendor/github.com/go-xorm/xorm/circle.yml +++ /dev/null @@ -1,41 +0,0 @@ -dependencies: - override: - # './...' is a relative pattern which means all subdirectories - - go get -t -d -v ./... - - go get -t -d -v github.com/go-xorm/tests - - go get -u github.com/go-xorm/core - - go get -u github.com/go-xorm/builder - - go build -v - -database: - override: - - mysql -u root -e "CREATE DATABASE xorm_test DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci" - - mysql -u root -e "CREATE DATABASE xorm_test1 DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci" - - mysql -u root -e "CREATE DATABASE xorm_test2 DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci" - - mysql -u root -e "CREATE DATABASE xorm_test3 DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci" - - createdb -p 5432 -e -U postgres xorm_test - - createdb -p 5432 -e -U postgres xorm_test1 - - createdb -p 5432 -e -U postgres xorm_test2 - - createdb -p 5432 -e -U postgres xorm_test3 - - psql xorm_test postgres -c "create schema xorm" - -test: - override: - # './...' is a relative pattern which means all subdirectories - - go get -u github.com/wadey/gocovmerge - - go test -v -race -db="sqlite3" -conn_str="./test.db" -coverprofile=coverage1-1.txt -covermode=atomic - - go test -v -race -db="sqlite3" -conn_str="./test.db" -cache=true -coverprofile=coverage1-2.txt -covermode=atomic - - go test -v -race -db="mysql" -conn_str="root:@/xorm_test" -coverprofile=coverage2-1.txt -covermode=atomic - - go test -v -race -db="mysql" -conn_str="root:@/xorm_test" -cache=true -coverprofile=coverage2-2.txt -covermode=atomic - - go test -v -race -db="mymysql" -conn_str="xorm_test/root/" -coverprofile=coverage3-1.txt -covermode=atomic - - go test -v -race -db="mymysql" -conn_str="xorm_test/root/" -cache=true -coverprofile=coverage3-2.txt -covermode=atomic - - go test -v -race -db="postgres" -conn_str="dbname=xorm_test sslmode=disable" -coverprofile=coverage4-1.txt -covermode=atomic - - go test -v -race -db="postgres" -conn_str="dbname=xorm_test sslmode=disable" -cache=true -coverprofile=coverage4-2.txt -covermode=atomic - - go test -v -race -db="postgres" -conn_str="dbname=xorm_test sslmode=disable" -schema=xorm -coverprofile=coverage5-1.txt -covermode=atomic - - go test -v -race -db="postgres" -conn_str="dbname=xorm_test sslmode=disable" -schema=xorm -cache=true -coverprofile=coverage5-2.txt -covermode=atomic - - gocovmerge coverage1-1.txt coverage1-2.txt coverage2-1.txt coverage2-2.txt coverage3-1.txt coverage3-2.txt coverage4-1.txt coverage4-2.txt coverage5-1.txt coverage5-2.txt > coverage.txt - - cd /home/ubuntu/.go_workspace/src/github.com/go-xorm/tests && ./sqlite3.sh - - cd /home/ubuntu/.go_workspace/src/github.com/go-xorm/tests && ./mysql.sh - - cd /home/ubuntu/.go_workspace/src/github.com/go-xorm/tests && ./postgres.sh - post: - - bash <(curl -s https://codecov.io/bash) \ No newline at end of file diff --git a/vendor/github.com/go-xorm/xorm/dialect_mssql.go b/vendor/github.com/go-xorm/xorm/dialect_mssql.go index fb1247094c7e9..3330212c12ca9 100644 --- a/vendor/github.com/go-xorm/xorm/dialect_mssql.go +++ b/vendor/github.com/go-xorm/xorm/dialect_mssql.go @@ -7,10 +7,11 @@ package xorm import ( "errors" "fmt" + "net/url" "strconv" "strings" - "github.com/go-xorm/core" + "xorm.io/core" ) var ( @@ -544,14 +545,23 @@ type odbcDriver struct { } func (p *odbcDriver) Parse(driverName, dataSourceName string) (*core.Uri, error) { - kv := strings.Split(dataSourceName, ";") var dbName string - for _, c := range kv { - vv := strings.Split(strings.TrimSpace(c), "=") - if len(vv) == 2 { - switch strings.ToLower(vv[0]) { - case "database": - dbName = vv[1] + + if strings.HasPrefix(dataSourceName, "sqlserver://") { + u, err := url.Parse(dataSourceName) + if err != nil { + return nil, err + } + dbName = u.Query().Get("database") + } else { + kv := strings.Split(dataSourceName, ";") + for _, c := range kv { + vv := strings.Split(strings.TrimSpace(c), "=") + if len(vv) == 2 { + switch strings.ToLower(vv[0]) { + case "database": + dbName = vv[1] + } } } } diff --git a/vendor/github.com/go-xorm/xorm/dialect_mysql.go b/vendor/github.com/go-xorm/xorm/dialect_mysql.go index 9f5ae3b2e50d1..2628042ab334c 100644 --- a/vendor/github.com/go-xorm/xorm/dialect_mysql.go +++ b/vendor/github.com/go-xorm/xorm/dialect_mysql.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/go-xorm/core" + "xorm.io/core" ) var ( @@ -393,6 +393,9 @@ func (db *mysql) GetColumns(tableName string) ([]string, map[string]*core.Column if colType == "FLOAT UNSIGNED" { colType = "FLOAT" } + if colType == "DOUBLE UNSIGNED" { + colType = "DOUBLE" + } col.Length = len1 col.Length2 = len2 if _, ok := core.SqlTypes[colType]; ok { diff --git a/vendor/github.com/go-xorm/xorm/dialect_oracle.go b/vendor/github.com/go-xorm/xorm/dialect_oracle.go index ac0081b38f7ec..b66145bcbeaa0 100644 --- a/vendor/github.com/go-xorm/xorm/dialect_oracle.go +++ b/vendor/github.com/go-xorm/xorm/dialect_oracle.go @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "github.com/go-xorm/core" + "xorm.io/core" ) var ( diff --git a/vendor/github.com/go-xorm/xorm/dialect_postgres.go b/vendor/github.com/go-xorm/xorm/dialect_postgres.go index 738c6a158182d..662f6401c1b6a 100644 --- a/vendor/github.com/go-xorm/xorm/dialect_postgres.go +++ b/vendor/github.com/go-xorm/xorm/dialect_postgres.go @@ -11,7 +11,7 @@ import ( "strconv" "strings" - "github.com/go-xorm/core" + "xorm.io/core" ) // from http://www.postgresql.org/docs/current/static/sql-keywords-appendix.html @@ -1093,6 +1093,19 @@ func (db *postgres) GetTables() ([]*core.Table, error) { return tables, nil } + +func getIndexColName(indexdef string) []string { + var colNames []string + + cs := strings.Split(indexdef, "(") + for _, v := range strings.Split(strings.Split(cs[1], ")")[0], ",") { + colNames = append(colNames, strings.Split(strings.TrimLeft(v, " "), " ")[0]) + } + + return colNames +} + + func (db *postgres) GetIndexes(tableName string) (map[string]*core.Index, error) { args := []interface{}{tableName} s := fmt.Sprintf("SELECT indexname, indexdef FROM pg_indexes WHERE tablename=$1") @@ -1126,8 +1139,7 @@ func (db *postgres) GetIndexes(tableName string) (map[string]*core.Index, error) } else { indexType = core.IndexType } - cs := strings.Split(indexdef, "(") - colNames = strings.Split(cs[1][0:len(cs[1])-1], ",") + colNames = getIndexColName(indexdef) var isRegular bool if strings.HasPrefix(indexName, "IDX_"+tableName) || strings.HasPrefix(indexName, "UQE_"+tableName) { newIdxName := indexName[5+len(tableName):] diff --git a/vendor/github.com/go-xorm/xorm/dialect_sqlite3.go b/vendor/github.com/go-xorm/xorm/dialect_sqlite3.go index e129481466e2f..e30a7751bfd44 100644 --- a/vendor/github.com/go-xorm/xorm/dialect_sqlite3.go +++ b/vendor/github.com/go-xorm/xorm/dialect_sqlite3.go @@ -11,7 +11,7 @@ import ( "regexp" "strings" - "github.com/go-xorm/core" + "xorm.io/core" ) var ( diff --git a/vendor/github.com/go-xorm/xorm/engine.go b/vendor/github.com/go-xorm/xorm/engine.go index c1bf06e15cd3a..962df125bbd97 100644 --- a/vendor/github.com/go-xorm/xorm/engine.go +++ b/vendor/github.com/go-xorm/xorm/engine.go @@ -7,6 +7,7 @@ package xorm import ( "bufio" "bytes" + "context" "database/sql" "encoding/gob" "errors" @@ -19,8 +20,8 @@ import ( "sync" "time" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" ) // Engine is the major struct of xorm, it means a database manager. @@ -52,6 +53,8 @@ type Engine struct { cachers map[string]core.Cacher cacherLock sync.RWMutex + + defaultContext context.Context } func (engine *Engine) setCacher(tableName string, cacher core.Cacher) { @@ -122,6 +125,7 @@ func (engine *Engine) Logger() core.ILogger { // SetLogger set the new logger func (engine *Engine) SetLogger(logger core.ILogger) { engine.logger = logger + engine.showSQL = logger.IsShowSQL() engine.dialect.SetLogger(logger) } @@ -1351,31 +1355,31 @@ func (engine *Engine) DropIndexes(bean interface{}) error { } // Exec raw sql -func (engine *Engine) Exec(sqlorArgs ...interface{}) (sql.Result, error) { +func (engine *Engine) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { session := engine.NewSession() defer session.Close() - return session.Exec(sqlorArgs...) + return session.Exec(sqlOrArgs...) } // Query a raw sql and return records as []map[string][]byte -func (engine *Engine) Query(sqlorArgs ...interface{}) (resultsSlice []map[string][]byte, err error) { +func (engine *Engine) Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) { session := engine.NewSession() defer session.Close() - return session.Query(sqlorArgs...) + return session.Query(sqlOrArgs...) } // QueryString runs a raw sql and return records as []map[string]string -func (engine *Engine) QueryString(sqlorArgs ...interface{}) ([]map[string]string, error) { +func (engine *Engine) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) { session := engine.NewSession() defer session.Close() - return session.QueryString(sqlorArgs...) + return session.QueryString(sqlOrArgs...) } // QueryInterface runs a raw sql and return records as []map[string]interface{} -func (engine *Engine) QueryInterface(sqlorArgs ...interface{}) ([]map[string]interface{}, error) { +func (engine *Engine) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) { session := engine.NewSession() defer session.Close() - return session.QueryInterface(sqlorArgs...) + return session.QueryInterface(sqlOrArgs...) } // Insert one or more records diff --git a/vendor/github.com/go-xorm/xorm/engine_cond.go b/vendor/github.com/go-xorm/xorm/engine_cond.go index 4dde8662e13ba..702ac8043402e 100644 --- a/vendor/github.com/go-xorm/xorm/engine_cond.go +++ b/vendor/github.com/go-xorm/xorm/engine_cond.go @@ -6,14 +6,13 @@ package xorm import ( "database/sql/driver" - "encoding/json" "fmt" "reflect" "strings" "time" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" ) func (engine *Engine) buildConds(table *core.Table, bean interface{}, @@ -147,7 +146,7 @@ func (engine *Engine) buildConds(table *core.Table, bean interface{}, } else { if col.SQLType.IsJson() { if col.SQLType.IsText() { - bytes, err := json.Marshal(fieldValue.Interface()) + bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { engine.logger.Error(err) continue @@ -156,7 +155,7 @@ func (engine *Engine) buildConds(table *core.Table, bean interface{}, } else if col.SQLType.IsBlob() { var bytes []byte var err error - bytes, err = json.Marshal(fieldValue.Interface()) + bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { engine.logger.Error(err) continue @@ -195,7 +194,7 @@ func (engine *Engine) buildConds(table *core.Table, bean interface{}, } if col.SQLType.IsText() { - bytes, err := json.Marshal(fieldValue.Interface()) + bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { engine.logger.Error(err) continue @@ -212,7 +211,7 @@ func (engine *Engine) buildConds(table *core.Table, bean interface{}, continue } } else { - bytes, err = json.Marshal(fieldValue.Interface()) + bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { engine.logger.Error(err) continue diff --git a/vendor/github.com/go-xorm/xorm/engine_context.go b/vendor/github.com/go-xorm/xorm/engine_context.go new file mode 100644 index 0000000000000..c6cbb76c1d989 --- /dev/null +++ b/vendor/github.com/go-xorm/xorm/engine_context.go @@ -0,0 +1,28 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package xorm + +import "context" + +// Context creates a session with the context +func (engine *Engine) Context(ctx context.Context) *Session { + session := engine.NewSession() + session.isAutoClose = true + return session.Context(ctx) +} + +// SetDefaultContext set the default context +func (engine *Engine) SetDefaultContext(ctx context.Context) { + engine.defaultContext = ctx +} + +// PingContext tests if database is alive +func (engine *Engine) PingContext(ctx context.Context) error { + session := engine.NewSession() + defer session.Close() + return session.PingContext(ctx) +} diff --git a/vendor/github.com/go-xorm/xorm/engine_group.go b/vendor/github.com/go-xorm/xorm/engine_group.go index 5eee3e6183312..42d49eca93eac 100644 --- a/vendor/github.com/go-xorm/xorm/engine_group.go +++ b/vendor/github.com/go-xorm/xorm/engine_group.go @@ -5,9 +5,10 @@ package xorm import ( + "context" "time" - "github.com/go-xorm/core" + "xorm.io/core" ) // EngineGroup defines an engine group @@ -74,6 +75,20 @@ func (eg *EngineGroup) Close() error { return nil } +// Context returned a group session +func (eg *EngineGroup) Context(ctx context.Context) *Session { + sess := eg.NewSession() + sess.isAutoClose = true + return sess.Context(ctx) +} + +// NewSession returned a group session +func (eg *EngineGroup) NewSession() *Session { + sess := eg.Engine.NewSession() + sess.sessionType = groupSession + return sess +} + // Master returns the master engine func (eg *EngineGroup) Master() *Engine { return eg.Engine diff --git a/vendor/github.com/go-xorm/xorm/engine_table.go b/vendor/github.com/go-xorm/xorm/engine_table.go index 94871a4bce5b1..eb5aa850af64e 100644 --- a/vendor/github.com/go-xorm/xorm/engine_table.go +++ b/vendor/github.com/go-xorm/xorm/engine_table.go @@ -9,10 +9,10 @@ import ( "reflect" "strings" - "github.com/go-xorm/core" + "xorm.io/core" ) -// TableNameWithSchema will automatically add schema prefix on table name +// tbNameWithSchema will automatically add schema prefix on table name func (engine *Engine) tbNameWithSchema(v string) string { // Add schema name as prefix of table name. // Only for postgres database. diff --git a/vendor/github.com/go-xorm/xorm/error.go b/vendor/github.com/go-xorm/xorm/error.go index a223fc4a86158..a67527acdab12 100644 --- a/vendor/github.com/go-xorm/xorm/error.go +++ b/vendor/github.com/go-xorm/xorm/error.go @@ -26,6 +26,8 @@ var ( ErrNotImplemented = errors.New("Not implemented") // ErrConditionType condition type unsupported ErrConditionType = errors.New("Unsupported condition type") + // ErrUnSupportedSQLType parameter of SQL is not supported + ErrUnSupportedSQLType = errors.New("unsupported sql type") ) // ErrFieldIsNotExist columns does not exist diff --git a/vendor/github.com/go-xorm/xorm/go.mod b/vendor/github.com/go-xorm/xorm/go.mod index 1856169558a03..9a30b4b48aa88 100644 --- a/vendor/github.com/go-xorm/xorm/go.mod +++ b/vendor/github.com/go-xorm/xorm/go.mod @@ -1,24 +1,24 @@ module github.com/go-xorm/xorm require ( + cloud.google.com/go v0.34.0 // indirect github.com/cockroachdb/apd v1.1.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/denisenkom/go-mssqldb v0.0.0-20181014144952-4e0d7dc8888f - github.com/go-sql-driver/mysql v1.4.0 - github.com/go-xorm/builder v0.3.2 - github.com/go-xorm/core v0.6.0 - github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a // indirect + github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 + github.com/go-sql-driver/mysql v1.4.1 + github.com/google/go-cmp v0.2.0 // indirect github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 // indirect - github.com/jackc/pgx v3.2.0+incompatible + github.com/jackc/pgx v3.3.0+incompatible github.com/kr/pretty v0.1.0 // indirect github.com/lib/pq v1.0.0 - github.com/mattn/go-sqlite3 v1.9.0 - github.com/pkg/errors v0.8.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/mattn/go-sqlite3 v1.10.0 + github.com/pkg/errors v0.8.1 // indirect github.com/satori/go.uuid v1.2.0 // indirect github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 // indirect - github.com/stretchr/testify v1.2.2 + github.com/stretchr/testify v1.3.0 github.com/ziutek/mymysql v1.5.4 + golang.org/x/crypto v0.0.0-20190122013713-64072686203f // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/stretchr/testify.v1 v1.2.2 + xorm.io/builder v0.3.5 + xorm.io/core v0.6.3 ) diff --git a/vendor/github.com/go-xorm/xorm/go.sum b/vendor/github.com/go-xorm/xorm/go.sum index dbf757d1d3541..307d46d9ad96c 100644 --- a/vendor/github.com/go-xorm/xorm/go.sum +++ b/vendor/github.com/go-xorm/xorm/go.sum @@ -1,21 +1,24 @@ +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20181014144952-4e0d7dc8888f h1:WH0w/R4Yoey+04HhFxqZ6VX6I0d7RMyw5aXQ9UTvQPs= -github.com/denisenkom/go-mssqldb v0.0.0-20181014144952-4e0d7dc8888f/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= -github.com/go-sql-driver/mysql v1.4.0 h1:7LxgVwFb2hIQtMm87NdgAVfXjnt4OePseqT1tKx+opk= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-xorm/builder v0.3.2 h1:pSsZQRRzJNapKEAEhigw3xLmiLPeAYv5GFlpYZ8+a5I= -github.com/go-xorm/builder v0.3.2/go.mod h1:v8mE3MFBgtL+RGFNfUnAMUqqfk/Y4W5KuwCFQIEpQLk= -github.com/go-xorm/core v0.6.0 h1:tp6hX+ku4OD9khFZS8VGBDRY3kfVCtelPfmkgCyHxL0= -github.com/go-xorm/core v0.6.0/go.mod h1:d8FJ9Br8OGyQl12MCclmYBuBqqxsyeedpXciV5Myih8= +github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 h1:b5OnbZD49x9g+/FcYbs/vukEt8C/jUbGhCJ3uduQmu8= +github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y= github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:56xuuqnHyryaerycW3BfssRdxQstACi0Epw/yC5E2xM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733 h1:vr3AYkKovP8uR8AvSGGUK1IDqRa5lAAvEkZG1LKaCRc= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= -github.com/jackc/pgx v3.2.0+incompatible h1:0Vihzu20St42/UDsvZGdNE6jak7oi/UOeMzwMPHkgFY= -github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= +github.com/jackc/pgx v3.3.0+incompatible h1:Wa90/+qsITBAPkAZjiByeIGHFcj3Ztu+VzrrIpHjL90= +github.com/jackc/pgx v3.3.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -23,21 +26,32 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/lib/pq v1.0.0 h1:X5PMW56eZitiTeO7tKzZxFCSpbFZJtkMMooicw2us9A= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4= -github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= -github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +golang.org/x/crypto v0.0.0-20190122013713-64072686203f h1:u1CmMhe3a44hy8VIgpInORnI01UVaUYheqR7x9BxT3c= +golang.org/x/crypto v0.0.0-20190122013713-64072686203f/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/stretchr/testify.v1 v1.2.2 h1:yhQC6Uy5CqibAIlk1wlusa/MJ3iAN49/BsR/dCCKz3M= -gopkg.in/stretchr/testify.v1 v1.2.2/go.mod h1:QI5V/q6UbPmuhtm10CaFZxED9NreB8PnFYN9JcR6TxU= +xorm.io/builder v0.3.5 h1:EilU39fvWDxjb1cDaELpYhsF+zziRBhew8xk4pngO+A= +xorm.io/builder v0.3.5/go.mod h1:ZFbByS/KxZI1FKRjL05PyJ4YrK2bcxlUaAxdum5aTR8= +xorm.io/core v0.6.2 h1:EJLcSxf336POJr670wKB55Mah9f93xzvGYzNRgnT8/Y= +xorm.io/core v0.6.2/go.mod h1:bwPIfLdm/FzWgVUH8WPVlr+uJhscvNGFcaZKXsI3n2c= +xorm.io/core v0.6.3 h1:n1NhVZt1s2oLw1BZfX2ocIJsHyso259uPgg63BGr37M= +xorm.io/core v0.6.3/go.mod h1:8kz/C6arVW/O9vk3PgCiMJO2hIAm1UcuOL3dSPyZ2qo= diff --git a/vendor/github.com/go-xorm/xorm/helpers.go b/vendor/github.com/go-xorm/xorm/helpers.go index f1705782e3d07..db8fc581f358b 100644 --- a/vendor/github.com/go-xorm/xorm/helpers.go +++ b/vendor/github.com/go-xorm/xorm/helpers.go @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/go-xorm/core" + "xorm.io/core" ) // str2PK convert string value to primary key value according to tp diff --git a/vendor/github.com/go-xorm/xorm/interface.go b/vendor/github.com/go-xorm/xorm/interface.go index 33d2078e44e74..0928f66a9ad04 100644 --- a/vendor/github.com/go-xorm/xorm/interface.go +++ b/vendor/github.com/go-xorm/xorm/interface.go @@ -5,11 +5,12 @@ package xorm import ( + "context" "database/sql" "reflect" "time" - "github.com/go-xorm/core" + "xorm.io/core" ) // Interface defines the interface which Engine, EngineGroup and Session will implementate. @@ -27,7 +28,7 @@ type Interface interface { Delete(interface{}) (int64, error) Distinct(columns ...string) *Session DropIndexes(bean interface{}) error - Exec(sqlOrAgrs ...interface{}) (sql.Result, error) + Exec(sqlOrArgs ...interface{}) (sql.Result, error) Exist(bean ...interface{}) (bool, error) Find(interface{}, ...interface{}) error FindAndCount(interface{}, ...interface{}) (int64, error) @@ -49,9 +50,9 @@ type Interface interface { Omit(columns ...string) *Session OrderBy(order string) *Session Ping() error - Query(sqlOrAgrs ...interface{}) (resultsSlice []map[string][]byte, err error) - QueryInterface(sqlorArgs ...interface{}) ([]map[string]interface{}, error) - QueryString(sqlorArgs ...interface{}) ([]map[string]string, error) + Query(sqlOrArgs ...interface{}) (resultsSlice []map[string][]byte, err error) + QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) + QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) Rows(bean interface{}) (*Rows, error) SetExpr(string, string) *Session SQL(interface{}, ...interface{}) *Session @@ -73,6 +74,7 @@ type EngineInterface interface { Before(func(interface{})) *Session Charset(charset string) *Session ClearCache(...interface{}) error + Context(context.Context) *Session CreateTables(...interface{}) error DBMetas() ([]*core.Table, error) Dialect() core.Dialect diff --git a/vendor/github.com/go-xorm/xorm/json.go b/vendor/github.com/go-xorm/xorm/json.go new file mode 100644 index 0000000000000..fdb6ce56540b3 --- /dev/null +++ b/vendor/github.com/go-xorm/xorm/json.go @@ -0,0 +1,31 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xorm + +import "encoding/json" + +// JSONInterface represents an interface to handle json data +type JSONInterface interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var ( + // DefaultJSONHandler default json handler + DefaultJSONHandler JSONInterface = StdJSON{} +) + +// StdJSON implements JSONInterface via encoding/json +type StdJSON struct{} + +// Marshal implements JSONInterface +func (StdJSON) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal implements JSONInterface +func (StdJSON) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} diff --git a/vendor/github.com/go-xorm/xorm/logger.go b/vendor/github.com/go-xorm/xorm/logger.go index 727d030a4cf8c..7b26e77f3bc2d 100644 --- a/vendor/github.com/go-xorm/xorm/logger.go +++ b/vendor/github.com/go-xorm/xorm/logger.go @@ -9,7 +9,7 @@ import ( "io" "log" - "github.com/go-xorm/core" + "xorm.io/core" ) // default log options diff --git a/vendor/github.com/go-xorm/xorm/rows.go b/vendor/github.com/go-xorm/xorm/rows.go index 54ec7f37a28af..bdd44589f8de9 100644 --- a/vendor/github.com/go-xorm/xorm/rows.go +++ b/vendor/github.com/go-xorm/xorm/rows.go @@ -9,16 +9,13 @@ import ( "fmt" "reflect" - "github.com/go-xorm/core" + "xorm.io/core" ) // Rows rows wrapper a rows to type Rows struct { - NoTypeCheck bool - session *Session rows *core.Rows - fields []string beanType reflect.Type lastError error } @@ -57,13 +54,6 @@ func newRows(session *Session, bean interface{}) (*Rows, error) { return nil, err } - rows.fields, err = rows.rows.Columns() - if err != nil { - rows.lastError = err - rows.Close() - return nil, err - } - return rows, nil } @@ -90,7 +80,7 @@ func (rows *Rows) Scan(bean interface{}) error { return rows.lastError } - if !rows.NoTypeCheck && reflect.Indirect(reflect.ValueOf(bean)).Type() != rows.beanType { + if reflect.Indirect(reflect.ValueOf(bean)).Type() != rows.beanType { return fmt.Errorf("scan arg is incompatible type to [%v]", rows.beanType) } @@ -98,13 +88,18 @@ func (rows *Rows) Scan(bean interface{}) error { return err } - scanResults, err := rows.session.row2Slice(rows.rows, rows.fields, bean) + fields, err := rows.rows.Columns() + if err != nil { + return err + } + + scanResults, err := rows.session.row2Slice(rows.rows, fields, bean) if err != nil { return err } dataStruct := rValue(bean) - _, err = rows.session.slice2Bean(scanResults, rows.fields, bean, &dataStruct, rows.session.statement.RefTable) + _, err = rows.session.slice2Bean(scanResults, fields, bean, &dataStruct, rows.session.statement.RefTable) if err != nil { return err } @@ -118,17 +113,9 @@ func (rows *Rows) Close() error { defer rows.session.Close() } - if rows.lastError == nil { - if rows.rows != nil { - rows.lastError = rows.rows.Close() - if rows.lastError != nil { - return rows.lastError - } - } - } else { - if rows.rows != nil { - defer rows.rows.Close() - } + if rows.rows != nil { + return rows.rows.Close() } + return rows.lastError } diff --git a/vendor/github.com/go-xorm/xorm/session.go b/vendor/github.com/go-xorm/xorm/session.go index e3437b9181439..b33955fdce732 100644 --- a/vendor/github.com/go-xorm/xorm/session.go +++ b/vendor/github.com/go-xorm/xorm/session.go @@ -5,8 +5,8 @@ package xorm import ( + "context" "database/sql" - "encoding/json" "errors" "fmt" "hash/crc32" @@ -14,7 +14,14 @@ import ( "strings" "time" - "github.com/go-xorm/core" + "xorm.io/core" +) + +type sessionType int + +const ( + engineSession sessionType = iota + groupSession ) // Session keep a pointer to sql.DB and provides all execution of all @@ -51,7 +58,8 @@ type Session struct { lastSQL string lastSQLArgs []interface{} - err error + ctx context.Context + sessionType sessionType } // Clone copy all the session's content and return a new session @@ -82,6 +90,8 @@ func (session *Session) Init() { session.lastSQL = "" session.lastSQLArgs = []interface{}{} + + session.ctx = session.engine.defaultContext } // Close release the connection from pool @@ -275,7 +285,7 @@ func (session *Session) doPrepare(db *core.DB, sqlStr string) (stmt *core.Stmt, var has bool stmt, has = session.stmtCache[crc] if !has { - stmt, err = db.Prepare(sqlStr) + stmt, err = db.PrepareContext(session.ctx, sqlStr) if err != nil { return nil, err } @@ -480,13 +490,13 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b continue } if fieldValue.CanAddr() { - err := json.Unmarshal(bs, fieldValue.Addr().Interface()) + err := DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) if err != nil { return nil, err } } else { x := reflect.New(fieldType) - err := json.Unmarshal(bs, x.Interface()) + err := DefaultJSONHandler.Unmarshal(bs, x.Interface()) if err != nil { return nil, err } @@ -510,13 +520,13 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b hasAssigned = true if len(bs) > 0 { if fieldValue.CanAddr() { - err := json.Unmarshal(bs, fieldValue.Addr().Interface()) + err := DefaultJSONHandler.Unmarshal(bs, fieldValue.Addr().Interface()) if err != nil { return nil, err } } else { x := reflect.New(fieldType) - err := json.Unmarshal(bs, x.Interface()) + err := DefaultJSONHandler.Unmarshal(bs, x.Interface()) if err != nil { return nil, err } @@ -532,7 +542,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b hasAssigned = true if col.SQLType.IsText() { x := reflect.New(fieldType) - err := json.Unmarshal(vv.Bytes(), x.Interface()) + err := DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) if err != nil { return nil, err } @@ -647,7 +657,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b hasAssigned = true x := reflect.New(fieldType) if len([]byte(vv.String())) > 0 { - err := json.Unmarshal([]byte(vv.String()), x.Interface()) + err := DefaultJSONHandler.Unmarshal([]byte(vv.String()), x.Interface()) if err != nil { return nil, err } @@ -657,7 +667,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b hasAssigned = true x := reflect.New(fieldType) if len(vv.Bytes()) > 0 { - err := json.Unmarshal(vv.Bytes(), x.Interface()) + err := DefaultJSONHandler.Unmarshal(vv.Bytes(), x.Interface()) if err != nil { return nil, err } @@ -793,7 +803,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b case core.Complex64Type: var x complex64 if len([]byte(vv.String())) > 0 { - err := json.Unmarshal([]byte(vv.String()), &x) + err := DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) if err != nil { return nil, err } @@ -803,7 +813,7 @@ func (session *Session) slice2Bean(scanResults []interface{}, fields []string, b case core.Complex128Type: var x complex128 if len([]byte(vv.String())) > 0 { - err := json.Unmarshal([]byte(vv.String()), &x) + err := DefaultJSONHandler.Unmarshal([]byte(vv.String()), &x) if err != nil { return nil, err } diff --git a/vendor/github.com/go-xorm/xorm/session_cols.go b/vendor/github.com/go-xorm/xorm/session_cols.go index 47d109c6cbbe9..dc3befcf6b989 100644 --- a/vendor/github.com/go-xorm/xorm/session_cols.go +++ b/vendor/github.com/go-xorm/xorm/session_cols.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/go-xorm/core" + "xorm.io/core" ) type incrParam struct { diff --git a/vendor/github.com/go-xorm/xorm/session_cond.go b/vendor/github.com/go-xorm/xorm/session_cond.go index e1d528f2dbd1b..b16bdea8e0359 100644 --- a/vendor/github.com/go-xorm/xorm/session_cond.go +++ b/vendor/github.com/go-xorm/xorm/session_cond.go @@ -4,7 +4,7 @@ package xorm -import "github.com/go-xorm/builder" +import "xorm.io/builder" // Sql provides raw sql input parameter. When you have a complex SQL statement // and cannot use Where, Id, In and etc. Methods to describe, you can use SQL. diff --git a/vendor/github.com/go-xorm/xorm/context.go b/vendor/github.com/go-xorm/xorm/session_context.go similarity index 60% rename from vendor/github.com/go-xorm/xorm/context.go rename to vendor/github.com/go-xorm/xorm/session_context.go index 074ba35a80a54..915f056858537 100644 --- a/vendor/github.com/go-xorm/xorm/context.go +++ b/vendor/github.com/go-xorm/xorm/session_context.go @@ -1,18 +1,15 @@ -// Copyright 2017 The Xorm Authors. All rights reserved. +// Copyright 2019 The Xorm Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.8 - package xorm import "context" -// PingContext tests if database is alive -func (engine *Engine) PingContext(ctx context.Context) error { - session := engine.NewSession() - defer session.Close() - return session.PingContext(ctx) +// Context sets the context on this session +func (session *Session) Context(ctx context.Context) *Session { + session.ctx = ctx + return session } // PingContext test if database is ok diff --git a/vendor/github.com/go-xorm/xorm/session_convert.go b/vendor/github.com/go-xorm/xorm/session_convert.go index 1f9d8aa1bd001..c13b003d6b2fd 100644 --- a/vendor/github.com/go-xorm/xorm/session_convert.go +++ b/vendor/github.com/go-xorm/xorm/session_convert.go @@ -7,7 +7,6 @@ package xorm import ( "database/sql" "database/sql/driver" - "encoding/json" "errors" "fmt" "reflect" @@ -15,7 +14,7 @@ import ( "strings" "time" - "github.com/go-xorm/core" + "xorm.io/core" ) func (session *Session) str2Time(col *core.Column, data string) (outTime time.Time, outErr error) { @@ -103,7 +102,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, case reflect.Complex64, reflect.Complex128: x := reflect.New(fieldType) if len(data) > 0 { - err := json.Unmarshal(data, x.Interface()) + err := DefaultJSONHandler.Unmarshal(data, x.Interface()) if err != nil { session.engine.logger.Error(err) return err @@ -117,7 +116,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, if col.SQLType.IsText() { x := reflect.New(fieldType) if len(data) > 0 { - err := json.Unmarshal(data, x.Interface()) + err := DefaultJSONHandler.Unmarshal(data, x.Interface()) if err != nil { session.engine.logger.Error(err) return err @@ -130,7 +129,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, } else { x := reflect.New(fieldType) if len(data) > 0 { - err := json.Unmarshal(data, x.Interface()) + err := DefaultJSONHandler.Unmarshal(data, x.Interface()) if err != nil { session.engine.logger.Error(err) return err @@ -259,7 +258,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, case core.Complex64Type.Kind(): var x complex64 if len(data) > 0 { - err := json.Unmarshal(data, &x) + err := DefaultJSONHandler.Unmarshal(data, &x) if err != nil { session.engine.logger.Error(err) return err @@ -270,7 +269,7 @@ func (session *Session) bytes2Value(col *core.Column, fieldValue *reflect.Value, case core.Complex128Type.Kind(): var x complex128 if len(data) > 0 { - err := json.Unmarshal(data, &x) + err := DefaultJSONHandler.Unmarshal(data, &x) if err != nil { session.engine.logger.Error(err) return err @@ -604,14 +603,14 @@ func (session *Session) value2Interface(col *core.Column, fieldValue reflect.Val } if col.SQLType.IsText() { - bytes, err := json.Marshal(fieldValue.Interface()) + bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { session.engine.logger.Error(err) return 0, err } return string(bytes), nil } else if col.SQLType.IsBlob() { - bytes, err := json.Marshal(fieldValue.Interface()) + bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { session.engine.logger.Error(err) return 0, err @@ -620,7 +619,7 @@ func (session *Session) value2Interface(col *core.Column, fieldValue reflect.Val } return nil, fmt.Errorf("Unsupported type %v", fieldValue.Type()) case reflect.Complex64, reflect.Complex128: - bytes, err := json.Marshal(fieldValue.Interface()) + bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { session.engine.logger.Error(err) return 0, err @@ -632,7 +631,7 @@ func (session *Session) value2Interface(col *core.Column, fieldValue reflect.Val } if col.SQLType.IsText() { - bytes, err := json.Marshal(fieldValue.Interface()) + bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { session.engine.logger.Error(err) return 0, err @@ -645,7 +644,7 @@ func (session *Session) value2Interface(col *core.Column, fieldValue reflect.Val (fieldValue.Type().Elem().Kind() == reflect.Uint8) { bytes = fieldValue.Bytes() } else { - bytes, err = json.Marshal(fieldValue.Interface()) + bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { session.engine.logger.Error(err) return 0, err diff --git a/vendor/github.com/go-xorm/xorm/session_delete.go b/vendor/github.com/go-xorm/xorm/session_delete.go index dcce543a3a1e3..675d4d8c7d56e 100644 --- a/vendor/github.com/go-xorm/xorm/session_delete.go +++ b/vendor/github.com/go-xorm/xorm/session_delete.go @@ -9,7 +9,7 @@ import ( "fmt" "strconv" - "github.com/go-xorm/core" + "xorm.io/core" ) func (session *Session) cacheDelete(table *core.Table, tableName, sqlStr string, args ...interface{}) error { @@ -79,6 +79,10 @@ func (session *Session) Delete(bean interface{}) (int64, error) { defer session.Close() } + if session.statement.lastError != nil { + return 0, session.statement.lastError + } + if err := session.statement.setRefBean(bean); err != nil { return 0, err } diff --git a/vendor/github.com/go-xorm/xorm/session_exist.go b/vendor/github.com/go-xorm/xorm/session_exist.go index 74a660e852bd3..660cc47e42580 100644 --- a/vendor/github.com/go-xorm/xorm/session_exist.go +++ b/vendor/github.com/go-xorm/xorm/session_exist.go @@ -9,8 +9,8 @@ import ( "fmt" "reflect" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" ) // Exist returns true if the record exist otherwise return false @@ -19,6 +19,10 @@ func (session *Session) Exist(bean ...interface{}) (bool, error) { defer session.Close() } + if session.statement.lastError != nil { + return false, session.statement.lastError + } + var sqlStr string var args []interface{} var err error @@ -30,6 +34,8 @@ func (session *Session) Exist(bean ...interface{}) (bool, error) { return false, ErrTableNotFound } + tableName = session.statement.Engine.Quote(tableName) + if session.statement.cond.IsValid() { condSQL, condArgs, err := builder.ToSQL(session.statement.cond) if err != nil { @@ -37,14 +43,18 @@ func (session *Session) Exist(bean ...interface{}) (bool, error) { } if session.engine.dialect.DBType() == core.MSSQL { - sqlStr = fmt.Sprintf("SELECT top 1 * FROM %s WHERE %s", tableName, condSQL) + sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s WHERE %s", tableName, condSQL) + } else if session.engine.dialect.DBType() == core.ORACLE { + sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE (%s) AND ROWNUM=1", tableName, condSQL) } else { sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE %s LIMIT 1", tableName, condSQL) } args = condArgs } else { if session.engine.dialect.DBType() == core.MSSQL { - sqlStr = fmt.Sprintf("SELECT top 1 * FROM %s", tableName) + sqlStr = fmt.Sprintf("SELECT TOP 1 * FROM %s", tableName) + } else if session.engine.dialect.DBType() == core.ORACLE { + sqlStr = fmt.Sprintf("SELECT * FROM %s WHERE ROWNUM=1", tableName) } else { sqlStr = fmt.Sprintf("SELECT * FROM %s LIMIT 1", tableName) } diff --git a/vendor/github.com/go-xorm/xorm/session_find.go b/vendor/github.com/go-xorm/xorm/session_find.go index a5b4f7934208d..d3fc0d30ddec3 100644 --- a/vendor/github.com/go-xorm/xorm/session_find.go +++ b/vendor/github.com/go-xorm/xorm/session_find.go @@ -10,8 +10,8 @@ import ( "reflect" "strings" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" ) const ( @@ -63,6 +63,10 @@ func (session *Session) FindAndCount(rowsSlicePtr interface{}, condiBean ...inte } func (session *Session) find(rowsSlicePtr interface{}, condiBean ...interface{}) error { + if session.statement.lastError != nil { + return session.statement.lastError + } + sliceValue := reflect.Indirect(reflect.ValueOf(rowsSlicePtr)) if sliceValue.Kind() != reflect.Slice && sliceValue.Kind() != reflect.Map { return errors.New("needs a pointer to a slice or a map") diff --git a/vendor/github.com/go-xorm/xorm/session_get.go b/vendor/github.com/go-xorm/xorm/session_get.go index 1cea31c5f89be..a38707c8c6907 100644 --- a/vendor/github.com/go-xorm/xorm/session_get.go +++ b/vendor/github.com/go-xorm/xorm/session_get.go @@ -11,7 +11,7 @@ import ( "reflect" "strconv" - "github.com/go-xorm/core" + "xorm.io/core" ) // Get retrieve one record from database, bean's non-empty fields @@ -24,6 +24,10 @@ func (session *Session) Get(bean interface{}) (bool, error) { } func (session *Session) get(bean interface{}) (bool, error) { + if session.statement.lastError != nil { + return false, session.statement.lastError + } + beanValue := reflect.ValueOf(bean) if beanValue.Kind() != reflect.Ptr { return false, errors.New("needs a pointer to a value") diff --git a/vendor/github.com/go-xorm/xorm/session_insert.go b/vendor/github.com/go-xorm/xorm/session_insert.go index e673e87425278..3cff48f6133f0 100644 --- a/vendor/github.com/go-xorm/xorm/session_insert.go +++ b/vendor/github.com/go-xorm/xorm/session_insert.go @@ -8,10 +8,11 @@ import ( "errors" "fmt" "reflect" + "sort" "strconv" "strings" - "github.com/go-xorm/core" + "xorm.io/core" ) // Insert insert one or more beans @@ -24,32 +25,67 @@ func (session *Session) Insert(beans ...interface{}) (int64, error) { } for _, bean := range beans { - sliceValue := reflect.Indirect(reflect.ValueOf(bean)) - if sliceValue.Kind() == reflect.Slice { - size := sliceValue.Len() - if size > 0 { - if session.engine.SupportInsertMany() { - cnt, err := session.innerInsertMulti(bean) - if err != nil { - return affected, err - } - affected += cnt - } else { - for i := 0; i < size; i++ { - cnt, err := session.innerInsert(sliceValue.Index(i).Interface()) + switch bean.(type) { + case map[string]interface{}: + cnt, err := session.insertMapInterface(bean.(map[string]interface{})) + if err != nil { + return affected, err + } + affected += cnt + case []map[string]interface{}: + s := bean.([]map[string]interface{}) + session.autoResetStatement = false + for i := 0; i < len(s); i++ { + cnt, err := session.insertMapInterface(s[i]) + if err != nil { + return affected, err + } + affected += cnt + } + case map[string]string: + cnt, err := session.insertMapString(bean.(map[string]string)) + if err != nil { + return affected, err + } + affected += cnt + case []map[string]string: + s := bean.([]map[string]string) + session.autoResetStatement = false + for i := 0; i < len(s); i++ { + cnt, err := session.insertMapString(s[i]) + if err != nil { + return affected, err + } + affected += cnt + } + default: + sliceValue := reflect.Indirect(reflect.ValueOf(bean)) + if sliceValue.Kind() == reflect.Slice { + size := sliceValue.Len() + if size > 0 { + if session.engine.SupportInsertMany() { + cnt, err := session.innerInsertMulti(bean) if err != nil { return affected, err } affected += cnt + } else { + for i := 0; i < size; i++ { + cnt, err := session.innerInsert(sliceValue.Index(i).Interface()) + if err != nil { + return affected, err + } + affected += cnt + } } } + } else { + cnt, err := session.innerInsert(bean) + if err != nil { + return affected, err + } + affected += cnt } - } else { - cnt, err := session.innerInsert(bean) - if err != nil { - return affected, err - } - affected += cnt } } @@ -337,21 +373,30 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { var sqlStr string var tableName = session.statement.TableName() + var output string + if session.engine.dialect.DBType() == core.MSSQL && len(table.AutoIncrement) > 0 { + output = fmt.Sprintf(" OUTPUT Inserted.%s", table.AutoIncrement) + } if len(colPlaces) > 0 { - sqlStr = fmt.Sprintf("INSERT INTO %s (%v%v%v) VALUES (%v)", + sqlStr = fmt.Sprintf("INSERT INTO %s (%v%v%v)%s VALUES (%v)", session.engine.Quote(tableName), session.engine.QuoteStr(), strings.Join(colNames, session.engine.Quote(", ")), session.engine.QuoteStr(), + output, colPlaces) } else { if session.engine.dialect.DBType() == core.MYSQL { sqlStr = fmt.Sprintf("INSERT INTO %s VALUES ()", session.engine.Quote(tableName)) } else { - sqlStr = fmt.Sprintf("INSERT INTO %s DEFAULT VALUES", session.engine.Quote(tableName)) + sqlStr = fmt.Sprintf("INSERT INTO %s%s DEFAULT VALUES", session.engine.Quote(tableName), output) } } + if len(table.AutoIncrement) > 0 && session.engine.dialect.DBType() == core.POSTGRES { + sqlStr = sqlStr + " RETURNING " + session.engine.Quote(table.AutoIncrement) + } + handleAfterInsertProcessorFunc := func(bean interface{}) { if session.isAutoCommit { for _, closure := range session.afterClosures { @@ -423,9 +468,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { aiValue.Set(int64ToIntValue(id, aiValue.Type())) return 1, nil - } else if session.engine.dialect.DBType() == core.POSTGRES && len(table.AutoIncrement) > 0 { - //assert table.AutoIncrement != "" - sqlStr = sqlStr + " RETURNING " + session.engine.Quote(table.AutoIncrement) + } else if len(table.AutoIncrement) > 0 && (session.engine.dialect.DBType() == core.POSTGRES || session.engine.dialect.DBType() == core.MSSQL) { res, err := session.queryBytes(sqlStr, args...) if err != nil { @@ -445,7 +488,7 @@ func (session *Session) innerInsert(bean interface{}) (int64, error) { } if len(res) < 1 { - return 0, errors.New("insert no error but not returned id") + return 0, errors.New("insert successfully but not returned id") } idByte := res[0][table.AutoIncrement] @@ -622,3 +665,83 @@ func (session *Session) genInsertColumns(bean interface{}) ([]string, []interfac } return colNames, args, nil } + +func (session *Session) insertMapInterface(m map[string]interface{}) (int64, error) { + if len(m) == 0 { + return 0, ErrParamsType + } + + var columns = make([]string, 0, len(m)) + for k := range m { + columns = append(columns, k) + } + sort.Strings(columns) + + qm := strings.Repeat("?,", len(columns)) + qm = "(" + qm[:len(qm)-1] + ")" + + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + var sql = fmt.Sprintf("INSERT INTO %s (`%s`) VALUES %s", session.engine.Quote(tableName), strings.Join(columns, "`,`"), qm) + var args = make([]interface{}, 0, len(m)) + for _, colName := range columns { + args = append(args, m[colName]) + } + + if err := session.cacheInsert(tableName); err != nil { + return 0, err + } + + res, err := session.exec(sql, args...) + if err != nil { + return 0, err + } + affected, err := res.RowsAffected() + if err != nil { + return 0, err + } + return affected, nil +} + +func (session *Session) insertMapString(m map[string]string) (int64, error) { + if len(m) == 0 { + return 0, ErrParamsType + } + + var columns = make([]string, 0, len(m)) + for k := range m { + columns = append(columns, k) + } + sort.Strings(columns) + + qm := strings.Repeat("?,", len(columns)) + qm = "(" + qm[:len(qm)-1] + ")" + + tableName := session.statement.TableName() + if len(tableName) <= 0 { + return 0, ErrTableNotFound + } + + var sql = fmt.Sprintf("INSERT INTO %s (`%s`) VALUES %s", session.engine.Quote(tableName), strings.Join(columns, "`,`"), qm) + var args = make([]interface{}, 0, len(m)) + for _, colName := range columns { + args = append(args, m[colName]) + } + + if err := session.cacheInsert(tableName); err != nil { + return 0, err + } + + res, err := session.exec(sql, args...) + if err != nil { + return 0, err + } + affected, err := res.RowsAffected() + if err != nil { + return 0, err + } + return affected, nil +} diff --git a/vendor/github.com/go-xorm/xorm/session_iterate.go b/vendor/github.com/go-xorm/xorm/session_iterate.go index 071fce49921de..ca996c2884d04 100644 --- a/vendor/github.com/go-xorm/xorm/session_iterate.go +++ b/vendor/github.com/go-xorm/xorm/session_iterate.go @@ -23,6 +23,10 @@ func (session *Session) Iterate(bean interface{}, fun IterFunc) error { defer session.Close() } + if session.statement.lastError != nil { + return session.statement.lastError + } + if session.statement.bufferSize > 0 { return session.bufferIterate(bean, fun) } diff --git a/vendor/github.com/go-xorm/xorm/session_query.go b/vendor/github.com/go-xorm/xorm/session_query.go index 6d597cc4592f8..21c00b8d7f357 100644 --- a/vendor/github.com/go-xorm/xorm/session_query.go +++ b/vendor/github.com/go-xorm/xorm/session_query.go @@ -11,13 +11,13 @@ import ( "strings" "time" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" ) -func (session *Session) genQuerySQL(sqlorArgs ...interface{}) (string, []interface{}, error) { - if len(sqlorArgs) > 0 { - return convertSQLOrArgs(sqlorArgs...) +func (session *Session) genQuerySQL(sqlOrArgs ...interface{}) (string, []interface{}, error) { + if len(sqlOrArgs) > 0 { + return convertSQLOrArgs(sqlOrArgs...) } if session.statement.RawSQL != "" { @@ -78,12 +78,12 @@ func (session *Session) genQuerySQL(sqlorArgs ...interface{}) (string, []interfa } // Query runs a raw sql and return records as []map[string][]byte -func (session *Session) Query(sqlorArgs ...interface{}) ([]map[string][]byte, error) { +func (session *Session) Query(sqlOrArgs ...interface{}) ([]map[string][]byte, error) { if session.isAutoClose { defer session.Close() } - sqlStr, args, err := session.genQuerySQL(sqlorArgs...) + sqlStr, args, err := session.genQuerySQL(sqlOrArgs...) if err != nil { return nil, err } @@ -227,12 +227,12 @@ func rows2SliceString(rows *core.Rows) (resultsSlice [][]string, err error) { } // QueryString runs a raw sql and return records as []map[string]string -func (session *Session) QueryString(sqlorArgs ...interface{}) ([]map[string]string, error) { +func (session *Session) QueryString(sqlOrArgs ...interface{}) ([]map[string]string, error) { if session.isAutoClose { defer session.Close() } - sqlStr, args, err := session.genQuerySQL(sqlorArgs...) + sqlStr, args, err := session.genQuerySQL(sqlOrArgs...) if err != nil { return nil, err } @@ -247,12 +247,12 @@ func (session *Session) QueryString(sqlorArgs ...interface{}) ([]map[string]stri } // QuerySliceString runs a raw sql and return records as [][]string -func (session *Session) QuerySliceString(sqlorArgs ...interface{}) ([][]string, error) { +func (session *Session) QuerySliceString(sqlOrArgs ...interface{}) ([][]string, error) { if session.isAutoClose { defer session.Close() } - sqlStr, args, err := session.genQuerySQL(sqlorArgs...) + sqlStr, args, err := session.genQuerySQL(sqlOrArgs...) if err != nil { return nil, err } @@ -300,12 +300,12 @@ func rows2Interfaces(rows *core.Rows) (resultsSlice []map[string]interface{}, er } // QueryInterface runs a raw sql and return records as []map[string]interface{} -func (session *Session) QueryInterface(sqlorArgs ...interface{}) ([]map[string]interface{}, error) { +func (session *Session) QueryInterface(sqlOrArgs ...interface{}) ([]map[string]interface{}, error) { if session.isAutoClose { defer session.Close() } - sqlStr, args, err := session.genQuerySQL(sqlorArgs...) + sqlStr, args, err := session.genQuerySQL(sqlOrArgs...) if err != nil { return nil, err } diff --git a/vendor/github.com/go-xorm/xorm/session_raw.go b/vendor/github.com/go-xorm/xorm/session_raw.go index 47823d6706322..67648ef130ee2 100644 --- a/vendor/github.com/go-xorm/xorm/session_raw.go +++ b/vendor/github.com/go-xorm/xorm/session_raw.go @@ -9,8 +9,8 @@ import ( "reflect" "time" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" ) func (session *Session) queryPreprocess(sqlStr *string, paramStr ...interface{}) { @@ -49,7 +49,7 @@ func (session *Session) queryRows(sqlStr string, args ...interface{}) (*core.Row if session.isAutoCommit { var db *core.DB - if session.engine.engineGroup != nil { + if session.sessionType == groupSession { db = session.engine.engineGroup.Slave().DB() } else { db = session.DB() @@ -62,21 +62,21 @@ func (session *Session) queryRows(sqlStr string, args ...interface{}) (*core.Row return nil, err } - rows, err := stmt.Query(args...) + rows, err := stmt.QueryContext(session.ctx, args...) if err != nil { return nil, err } return rows, nil } - rows, err := db.Query(sqlStr, args...) + rows, err := db.QueryContext(session.ctx, sqlStr, args...) if err != nil { return nil, err } return rows, nil } - rows, err := session.tx.Query(sqlStr, args...) + rows, err := session.tx.QueryContext(session.ctx, sqlStr, args...) if err != nil { return nil, err } @@ -175,7 +175,7 @@ func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, er } if !session.isAutoCommit { - return session.tx.Exec(sqlStr, args...) + return session.tx.ExecContext(session.ctx, sqlStr, args...) } if session.prepareStmt { @@ -184,24 +184,24 @@ func (session *Session) exec(sqlStr string, args ...interface{}) (sql.Result, er return nil, err } - res, err := stmt.Exec(args...) + res, err := stmt.ExecContext(session.ctx, args...) if err != nil { return nil, err } return res, nil } - return session.DB().Exec(sqlStr, args...) + return session.DB().ExecContext(session.ctx, sqlStr, args...) } -func convertSQLOrArgs(sqlorArgs ...interface{}) (string, []interface{}, error) { - switch sqlorArgs[0].(type) { +func convertSQLOrArgs(sqlOrArgs ...interface{}) (string, []interface{}, error) { + switch sqlOrArgs[0].(type) { case string: - return sqlorArgs[0].(string), sqlorArgs[1:], nil + return sqlOrArgs[0].(string), sqlOrArgs[1:], nil case *builder.Builder: - return sqlorArgs[0].(*builder.Builder).ToSQL() + return sqlOrArgs[0].(*builder.Builder).ToSQL() case builder.Builder: - bd := sqlorArgs[0].(builder.Builder) + bd := sqlOrArgs[0].(builder.Builder) return bd.ToSQL() } @@ -209,16 +209,16 @@ func convertSQLOrArgs(sqlorArgs ...interface{}) (string, []interface{}, error) { } // Exec raw sql -func (session *Session) Exec(sqlorArgs ...interface{}) (sql.Result, error) { +func (session *Session) Exec(sqlOrArgs ...interface{}) (sql.Result, error) { if session.isAutoClose { defer session.Close() } - if len(sqlorArgs) == 0 { + if len(sqlOrArgs) == 0 { return nil, ErrUnSupportedType } - sqlStr, args, err := convertSQLOrArgs(sqlorArgs...) + sqlStr, args, err := convertSQLOrArgs(sqlOrArgs...) if err != nil { return nil, err } diff --git a/vendor/github.com/go-xorm/xorm/session_schema.go b/vendor/github.com/go-xorm/xorm/session_schema.go index 369ec72a4d8a3..da5c88559914a 100644 --- a/vendor/github.com/go-xorm/xorm/session_schema.go +++ b/vendor/github.com/go-xorm/xorm/session_schema.go @@ -9,7 +9,7 @@ import ( "fmt" "strings" - "github.com/go-xorm/core" + "xorm.io/core" ) // Ping test if database is ok @@ -19,7 +19,7 @@ func (session *Session) Ping() error { } session.engine.logger.Infof("PING DATABASE %v", session.engine.DriverName()) - return session.DB().Ping() + return session.DB().PingContext(session.ctx) } // CreateTable create a table according a bean diff --git a/vendor/github.com/go-xorm/xorm/session_tx.go b/vendor/github.com/go-xorm/xorm/session_tx.go index c8d759a31acef..ee3d473f953d0 100644 --- a/vendor/github.com/go-xorm/xorm/session_tx.go +++ b/vendor/github.com/go-xorm/xorm/session_tx.go @@ -7,7 +7,7 @@ package xorm // Begin a transaction func (session *Session) Begin() error { if session.isAutoCommit { - tx, err := session.DB().Begin() + tx, err := session.DB().BeginTx(session.ctx, nil) if err != nil { return err } diff --git a/vendor/github.com/go-xorm/xorm/session_update.go b/vendor/github.com/go-xorm/xorm/session_update.go index 37b34ff3dd0ba..216c4e87dd6d2 100644 --- a/vendor/github.com/go-xorm/xorm/session_update.go +++ b/vendor/github.com/go-xorm/xorm/session_update.go @@ -11,8 +11,8 @@ import ( "strconv" "strings" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" ) func (session *Session) cacheUpdate(table *core.Table, tableName, sqlStr string, args ...interface{}) error { @@ -147,6 +147,10 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 defer session.Close() } + if session.statement.lastError != nil { + return 0, session.statement.lastError + } + v := rValue(bean) t := v.Type() @@ -240,23 +244,39 @@ func (session *Session) Update(bean interface{}, condiBean ...interface{}) (int6 } var autoCond builder.Cond - if !session.statement.noAutoCondition && len(condiBean) > 0 { - if c, ok := condiBean[0].(map[string]interface{}); ok { - autoCond = builder.Eq(c) - } else { - ct := reflect.TypeOf(condiBean[0]) - k := ct.Kind() - if k == reflect.Ptr { - k = ct.Elem().Kind() + if !session.statement.noAutoCondition { + condBeanIsStruct := false + if len(condiBean) > 0 { + if c, ok := condiBean[0].(map[string]interface{}); ok { + autoCond = builder.Eq(c) + } else { + ct := reflect.TypeOf(condiBean[0]) + k := ct.Kind() + if k == reflect.Ptr { + k = ct.Elem().Kind() + } + if k == reflect.Struct { + var err error + autoCond, err = session.statement.buildConds(session.statement.RefTable, condiBean[0], true, true, false, true, false) + if err != nil { + return 0, err + } + condBeanIsStruct = true + } else { + return 0, ErrConditionType + } } - if k == reflect.Struct { - var err error - autoCond, err = session.statement.buildConds(session.statement.RefTable, condiBean[0], true, true, false, true, false) - if err != nil { - return 0, err + } + + if !condBeanIsStruct && table != nil { + if col := table.DeletedColumn(); col != nil && !session.statement.unscoped { // tag "deleted" is enabled + autoCond1 := session.engine.CondDeleted(session.engine.Quote(col.Name)) + + if autoCond == nil { + autoCond = autoCond1 + } else { + autoCond = autoCond.And(autoCond1) } - } else { - return 0, ErrConditionType } } } diff --git a/vendor/github.com/go-xorm/xorm/statement.go b/vendor/github.com/go-xorm/xorm/statement.go index a7f7010ad2b27..88b8423517873 100644 --- a/vendor/github.com/go-xorm/xorm/statement.go +++ b/vendor/github.com/go-xorm/xorm/statement.go @@ -6,15 +6,14 @@ package xorm import ( "database/sql/driver" - "encoding/json" "errors" "fmt" "reflect" "strings" "time" - "github.com/go-xorm/builder" - "github.com/go-xorm/core" + "xorm.io/builder" + "xorm.io/core" ) // Statement save all the sql info for executing SQL @@ -60,6 +59,7 @@ type Statement struct { cond builder.Cond bufferSize int context ContextCache + lastError error } // Init reset all the statement's fields @@ -101,6 +101,7 @@ func (statement *Statement) Init() { statement.cond = builder.NewCond() statement.bufferSize = 0 statement.context = nil + statement.lastError = nil } // NoAutoCondition if you do not want convert bean's field as query condition, then use this function @@ -125,13 +126,13 @@ func (statement *Statement) SQL(query interface{}, args ...interface{}) *Stateme var err error statement.RawSQL, statement.RawParams, err = query.(*builder.Builder).ToSQL() if err != nil { - statement.Engine.logger.Error(err) + statement.lastError = err } case string: statement.RawSQL = query.(string) statement.RawParams = args default: - statement.Engine.logger.Error("unsupported sql type") + statement.lastError = ErrUnSupportedSQLType } return statement @@ -160,7 +161,7 @@ func (statement *Statement) And(query interface{}, args ...interface{}) *Stateme } } default: - // TODO: not support condition type + statement.lastError = ErrConditionType } return statement @@ -406,7 +407,7 @@ func (statement *Statement) buildUpdates(bean interface{}, } else { // Blank struct could not be as update data if requiredField || !isStructZero(fieldValue) { - bytes, err := json.Marshal(fieldValue.Interface()) + bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { panic(fmt.Sprintf("mashal %v failed", fieldValue.Interface())) } @@ -435,7 +436,7 @@ func (statement *Statement) buildUpdates(bean interface{}, } if col.SQLType.IsText() { - bytes, err := json.Marshal(fieldValue.Interface()) + bytes, err := DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { engine.logger.Error(err) continue @@ -455,7 +456,7 @@ func (statement *Statement) buildUpdates(bean interface{}, fieldType.Elem().Kind() == reflect.Uint8 { val = fieldValue.Slice(0, 0).Interface() } else { - bytes, err = json.Marshal(fieldValue.Interface()) + bytes, err = DefaultJSONHandler.Marshal(fieldValue.Interface()) if err != nil { engine.logger.Error(err) continue @@ -755,9 +756,32 @@ func (statement *Statement) Join(joinOP string, tablename interface{}, condition fmt.Fprintf(&buf, "%v JOIN ", joinOP) } - tbName := statement.Engine.TableName(tablename, true) + switch tp := tablename.(type) { + case builder.Builder: + subSQL, subQueryArgs, err := tp.ToSQL() + if err != nil { + statement.lastError = err + return statement + } + tbs := strings.Split(tp.TableName(), ".") + var aliasName = strings.Trim(tbs[len(tbs)-1], statement.Engine.QuoteStr()) + fmt.Fprintf(&buf, "(%s) %s ON %v", subSQL, aliasName, condition) + statement.joinArgs = append(statement.joinArgs, subQueryArgs...) + case *builder.Builder: + subSQL, subQueryArgs, err := tp.ToSQL() + if err != nil { + statement.lastError = err + return statement + } + tbs := strings.Split(tp.TableName(), ".") + var aliasName = strings.Trim(tbs[len(tbs)-1], statement.Engine.QuoteStr()) + fmt.Fprintf(&buf, "(%s) %s ON %v", subSQL, aliasName, condition) + statement.joinArgs = append(statement.joinArgs, subQueryArgs...) + default: + tbName := statement.Engine.TableName(tablename, true) + fmt.Fprintf(&buf, "%s ON %v", tbName, condition) + } - fmt.Fprintf(&buf, "%s ON %v", tbName, condition) statement.JoinStr = buf.String() statement.joinArgs = append(statement.joinArgs, args...) return statement @@ -1064,7 +1088,7 @@ func (statement *Statement) genSelectSQL(columnStr, condSQL string, needLimit, n if dialect.DBType() == core.MSSQL { if statement.LimitN > 0 { - top = fmt.Sprintf(" TOP %d ", statement.LimitN) + top = fmt.Sprintf("TOP %d ", statement.LimitN) } if statement.Start > 0 { var column string diff --git a/vendor/github.com/go-xorm/xorm/syslogger.go b/vendor/github.com/go-xorm/xorm/syslogger.go index 8840635d4c93e..11ba01e7bd3e0 100644 --- a/vendor/github.com/go-xorm/xorm/syslogger.go +++ b/vendor/github.com/go-xorm/xorm/syslogger.go @@ -10,7 +10,7 @@ import ( "fmt" "log/syslog" - "github.com/go-xorm/core" + "xorm.io/core" ) var _ core.ILogger = &SyslogLogger{} diff --git a/vendor/github.com/go-xorm/xorm/tag.go b/vendor/github.com/go-xorm/xorm/tag.go index e1c821fb540d5..eb87be78299f3 100644 --- a/vendor/github.com/go-xorm/xorm/tag.go +++ b/vendor/github.com/go-xorm/xorm/tag.go @@ -11,7 +11,7 @@ import ( "strings" "time" - "github.com/go-xorm/core" + "xorm.io/core" ) type tagContext struct { diff --git a/vendor/github.com/go-xorm/xorm/test_mssql.sh b/vendor/github.com/go-xorm/xorm/test_mssql.sh index 6f9cf7295f7ee..7f060cff32cda 100644 --- a/vendor/github.com/go-xorm/xorm/test_mssql.sh +++ b/vendor/github.com/go-xorm/xorm/test_mssql.sh @@ -1 +1 @@ -go test -db=mssql -conn_str="server=192.168.1.58;user id=sa;password=123456;database=xorm_test" \ No newline at end of file +go test -db=mssql -conn_str="server=localhost;user id=sa;password=yourStrong(!)Password;database=xorm_test" \ No newline at end of file diff --git a/vendor/github.com/go-xorm/xorm/test_tidb.sh b/vendor/github.com/go-xorm/xorm/test_tidb.sh new file mode 100644 index 0000000000000..03d2d6cd82b1f --- /dev/null +++ b/vendor/github.com/go-xorm/xorm/test_tidb.sh @@ -0,0 +1 @@ +go test -db=mysql -conn_str="root:@tcp(localhost:4000)/xorm_test" -ignore_select_update=true \ No newline at end of file diff --git a/vendor/github.com/go-xorm/xorm/types.go b/vendor/github.com/go-xorm/xorm/types.go index 99d761c27892b..c76a54606512f 100644 --- a/vendor/github.com/go-xorm/xorm/types.go +++ b/vendor/github.com/go-xorm/xorm/types.go @@ -1,9 +1,13 @@ +// Copyright 2017 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package xorm import ( "reflect" - "github.com/go-xorm/core" + "xorm.io/core" ) var ( diff --git a/vendor/github.com/go-xorm/xorm/xorm.go b/vendor/github.com/go-xorm/xorm/xorm.go index 739de8d4292cf..26d00d264da93 100644 --- a/vendor/github.com/go-xorm/xorm/xorm.go +++ b/vendor/github.com/go-xorm/xorm/xorm.go @@ -7,6 +7,7 @@ package xorm import ( + "context" "fmt" "os" "reflect" @@ -14,7 +15,7 @@ import ( "sync" "time" - "github.com/go-xorm/core" + "xorm.io/core" ) const ( @@ -85,14 +86,15 @@ func NewEngine(driverName string, dataSourceName string) (*Engine, error) { } engine := &Engine{ - db: db, - dialect: dialect, - Tables: make(map[reflect.Type]*core.Table), - mutex: &sync.RWMutex{}, - TagIdentifier: "xorm", - TZLocation: time.Local, - tagHandlers: defaultTagHandlers, - cachers: make(map[string]core.Cacher), + db: db, + dialect: dialect, + Tables: make(map[reflect.Type]*core.Table), + mutex: &sync.RWMutex{}, + TagIdentifier: "xorm", + TZLocation: time.Local, + tagHandlers: defaultTagHandlers, + cachers: make(map[string]core.Cacher), + defaultContext: context.Background(), } if uri.DbType == core.SQLITE { diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go index 16f87c5d3787b..bbc1cb9c34551 100644 --- a/vendor/google.golang.org/appengine/internal/api.go +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. // +build !appengine -// +build go1.7 package internal @@ -130,7 +129,13 @@ func handleHTTP(w http.ResponseWriter, r *http.Request) { flushes++ } c.pendingLogs.Unlock() - go c.flushLog(false) + flushed := make(chan struct{}) + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) // Avoid nil Write call if c.Write is never called. @@ -140,6 +145,9 @@ func handleHTTP(w http.ResponseWriter, r *http.Request) { if c.outBody != nil { w.Write(c.outBody) } + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed } func executeRequestSafely(c *context, r *http.Request) { @@ -571,7 +579,10 @@ func logf(c *context, level int64, format string, args ...interface{}) { Level: &level, Message: &s, }) - log.Print(logLevelName[level] + ": " + s) + // Only duplicate log to stderr if not running on App Engine second generation + if !IsSecondGen() { + log.Print(logLevelName[level] + ": " + s) + } } // flushLog attempts to flush any pending logs to the appserver. diff --git a/vendor/google.golang.org/appengine/internal/api_pre17.go b/vendor/google.golang.org/appengine/internal/api_pre17.go deleted file mode 100644 index 028b4f056e0d3..0000000000000 --- a/vendor/google.golang.org/appengine/internal/api_pre17.go +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright 2011 Google Inc. All rights reserved. -// Use of this source code is governed by the Apache 2.0 -// license that can be found in the LICENSE file. - -// +build !appengine -// +build !go1.7 - -package internal - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "log" - "net" - "net/http" - "net/url" - "os" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" - - "github.com/golang/protobuf/proto" - netcontext "golang.org/x/net/context" - - basepb "google.golang.org/appengine/internal/base" - logpb "google.golang.org/appengine/internal/log" - remotepb "google.golang.org/appengine/internal/remote_api" -) - -const ( - apiPath = "/rpc_http" - defaultTicketSuffix = "/default.20150612t184001.0" -) - -var ( - // Incoming headers. - ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") - dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") - traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") - curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") - userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") - remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") - - // Outgoing headers. - apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") - apiEndpointHeaderValue = []string{"app-engine-apis"} - apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") - apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} - apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") - apiContentType = http.CanonicalHeaderKey("Content-Type") - apiContentTypeValue = []string{"application/octet-stream"} - logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") - - apiHTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: limitDial, - }, - } - - defaultTicketOnce sync.Once - defaultTicket string -) - -func apiURL() *url.URL { - host, port := "appengine.googleapis.internal", "10001" - if h := os.Getenv("API_HOST"); h != "" { - host = h - } - if p := os.Getenv("API_PORT"); p != "" { - port = p - } - return &url.URL{ - Scheme: "http", - Host: host + ":" + port, - Path: apiPath, - } -} - -func handleHTTP(w http.ResponseWriter, r *http.Request) { - c := &context{ - req: r, - outHeader: w.Header(), - apiURL: apiURL(), - } - stopFlushing := make(chan int) - - ctxs.Lock() - ctxs.m[r] = c - ctxs.Unlock() - defer func() { - ctxs.Lock() - delete(ctxs.m, r) - ctxs.Unlock() - }() - - // Patch up RemoteAddr so it looks reasonable. - if addr := r.Header.Get(userIPHeader); addr != "" { - r.RemoteAddr = addr - } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { - r.RemoteAddr = addr - } else { - // Should not normally reach here, but pick a sensible default anyway. - r.RemoteAddr = "127.0.0.1" - } - // The address in the headers will most likely be of these forms: - // 123.123.123.123 - // 2001:db8::1 - // net/http.Request.RemoteAddr is specified to be in "IP:port" form. - if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { - // Assume the remote address is only a host; add a default port. - r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") - } - - // Start goroutine responsible for flushing app logs. - // This is done after adding c to ctx.m (and stopped before removing it) - // because flushing logs requires making an API call. - go c.logFlusher(stopFlushing) - - executeRequestSafely(c, r) - c.outHeader = nil // make sure header changes aren't respected any more - - stopFlushing <- 1 // any logging beyond this point will be dropped - - // Flush any pending logs asynchronously. - c.pendingLogs.Lock() - flushes := c.pendingLogs.flushes - if len(c.pendingLogs.lines) > 0 { - flushes++ - } - c.pendingLogs.Unlock() - go c.flushLog(false) - w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) - - // Avoid nil Write call if c.Write is never called. - if c.outCode != 0 { - w.WriteHeader(c.outCode) - } - if c.outBody != nil { - w.Write(c.outBody) - } -} - -func executeRequestSafely(c *context, r *http.Request) { - defer func() { - if x := recover(); x != nil { - logf(c, 4, "%s", renderPanic(x)) // 4 == critical - c.outCode = 500 - } - }() - - http.DefaultServeMux.ServeHTTP(c, r) -} - -func renderPanic(x interface{}) string { - buf := make([]byte, 16<<10) // 16 KB should be plenty - buf = buf[:runtime.Stack(buf, false)] - - // Remove the first few stack frames: - // this func - // the recover closure in the caller - // That will root the stack trace at the site of the panic. - const ( - skipStart = "internal.renderPanic" - skipFrames = 2 - ) - start := bytes.Index(buf, []byte(skipStart)) - p := start - for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { - p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 - if p < 0 { - break - } - } - if p >= 0 { - // buf[start:p+1] is the block to remove. - // Copy buf[p+1:] over buf[start:] and shrink buf. - copy(buf[start:], buf[p+1:]) - buf = buf[:len(buf)-(p+1-start)] - } - - // Add panic heading. - head := fmt.Sprintf("panic: %v\n\n", x) - if len(head) > len(buf) { - // Extremely unlikely to happen. - return head - } - copy(buf[len(head):], buf) - copy(buf, head) - - return string(buf) -} - -var ctxs = struct { - sync.Mutex - m map[*http.Request]*context - bg *context // background context, lazily initialized - // dec is used by tests to decorate the netcontext.Context returned - // for a given request. This allows tests to add overrides (such as - // WithAppIDOverride) to the context. The map is nil outside tests. - dec map[*http.Request]func(netcontext.Context) netcontext.Context -}{ - m: make(map[*http.Request]*context), -} - -// context represents the context of an in-flight HTTP request. -// It implements the appengine.Context and http.ResponseWriter interfaces. -type context struct { - req *http.Request - - outCode int - outHeader http.Header - outBody []byte - - pendingLogs struct { - sync.Mutex - lines []*logpb.UserAppLogLine - flushes int - } - - apiURL *url.URL -} - -var contextKey = "holds a *context" - -// fromContext returns the App Engine context or nil if ctx is not -// derived from an App Engine context. -func fromContext(ctx netcontext.Context) *context { - c, _ := ctx.Value(&contextKey).(*context) - return c -} - -func withContext(parent netcontext.Context, c *context) netcontext.Context { - ctx := netcontext.WithValue(parent, &contextKey, c) - if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { - ctx = withNamespace(ctx, ns) - } - return ctx -} - -func toContext(c *context) netcontext.Context { - return withContext(netcontext.Background(), c) -} - -func IncomingHeaders(ctx netcontext.Context) http.Header { - if c := fromContext(ctx); c != nil { - return c.req.Header - } - return nil -} - -func ReqContext(req *http.Request) netcontext.Context { - return WithContext(netcontext.Background(), req) -} - -func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { - ctxs.Lock() - c := ctxs.m[req] - d := ctxs.dec[req] - ctxs.Unlock() - - if d != nil { - parent = d(parent) - } - - if c == nil { - // Someone passed in an http.Request that is not in-flight. - // We panic here rather than panicking at a later point - // so that stack traces will be more sensible. - log.Panic("appengine: NewContext passed an unknown http.Request") - } - return withContext(parent, c) -} - -// DefaultTicket returns a ticket used for background context or dev_appserver. -func DefaultTicket() string { - defaultTicketOnce.Do(func() { - if IsDevAppServer() { - defaultTicket = "testapp" + defaultTicketSuffix - return - } - appID := partitionlessAppID() - escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) - majVersion := VersionID(nil) - if i := strings.Index(majVersion, "."); i > 0 { - majVersion = majVersion[:i] - } - defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) - }) - return defaultTicket -} - -func BackgroundContext() netcontext.Context { - ctxs.Lock() - defer ctxs.Unlock() - - if ctxs.bg != nil { - return toContext(ctxs.bg) - } - - // Compute background security ticket. - ticket := DefaultTicket() - - ctxs.bg = &context{ - req: &http.Request{ - Header: http.Header{ - ticketHeader: []string{ticket}, - }, - }, - apiURL: apiURL(), - } - - // TODO(dsymonds): Wire up the shutdown handler to do a final flush. - go ctxs.bg.logFlusher(make(chan int)) - - return toContext(ctxs.bg) -} - -// RegisterTestRequest registers the HTTP request req for testing, such that -// any API calls are sent to the provided URL. It returns a closure to delete -// the registration. -// It should only be used by aetest package. -func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { - c := &context{ - req: req, - apiURL: apiURL, - } - ctxs.Lock() - defer ctxs.Unlock() - if _, ok := ctxs.m[req]; ok { - log.Panic("req already associated with context") - } - if _, ok := ctxs.dec[req]; ok { - log.Panic("req already associated with context") - } - if ctxs.dec == nil { - ctxs.dec = make(map[*http.Request]func(netcontext.Context) netcontext.Context) - } - ctxs.m[req] = c - ctxs.dec[req] = decorate - - return req, func() { - ctxs.Lock() - delete(ctxs.m, req) - delete(ctxs.dec, req) - ctxs.Unlock() - } -} - -var errTimeout = &CallError{ - Detail: "Deadline exceeded", - Code: int32(remotepb.RpcError_CANCELLED), - Timeout: true, -} - -func (c *context) Header() http.Header { return c.outHeader } - -// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status -// codes do not permit a response body (nor response entity headers such as -// Content-Length, Content-Type, etc). -func bodyAllowedForStatus(status int) bool { - switch { - case status >= 100 && status <= 199: - return false - case status == 204: - return false - case status == 304: - return false - } - return true -} - -func (c *context) Write(b []byte) (int, error) { - if c.outCode == 0 { - c.WriteHeader(http.StatusOK) - } - if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { - return 0, http.ErrBodyNotAllowed - } - c.outBody = append(c.outBody, b...) - return len(b), nil -} - -func (c *context) WriteHeader(code int) { - if c.outCode != 0 { - logf(c, 3, "WriteHeader called multiple times on request.") // error level - return - } - c.outCode = code -} - -func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { - hreq := &http.Request{ - Method: "POST", - URL: c.apiURL, - Header: http.Header{ - apiEndpointHeader: apiEndpointHeaderValue, - apiMethodHeader: apiMethodHeaderValue, - apiContentType: apiContentTypeValue, - apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, - }, - Body: ioutil.NopCloser(bytes.NewReader(body)), - ContentLength: int64(len(body)), - Host: c.apiURL.Host, - } - if info := c.req.Header.Get(dapperHeader); info != "" { - hreq.Header.Set(dapperHeader, info) - } - if info := c.req.Header.Get(traceHeader); info != "" { - hreq.Header.Set(traceHeader, info) - } - - tr := apiHTTPClient.Transport.(*http.Transport) - - var timedOut int32 // atomic; set to 1 if timed out - t := time.AfterFunc(timeout, func() { - atomic.StoreInt32(&timedOut, 1) - tr.CancelRequest(hreq) - }) - defer t.Stop() - defer func() { - // Check if timeout was exceeded. - if atomic.LoadInt32(&timedOut) != 0 { - err = errTimeout - } - }() - - hresp, err := apiHTTPClient.Do(hreq) - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - defer hresp.Body.Close() - hrespBody, err := ioutil.ReadAll(hresp.Body) - if hresp.StatusCode != 200 { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - if err != nil { - return nil, &CallError{ - Detail: fmt.Sprintf("service bridge response bad: %v", err), - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return hrespBody, nil -} - -func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { - if ns := NamespaceFromContext(ctx); ns != "" { - if fn, ok := NamespaceMods[service]; ok { - fn(in, ns) - } - } - - if f, ctx, ok := callOverrideFromContext(ctx); ok { - return f(ctx, service, method, in, out) - } - - // Handle already-done contexts quickly. - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - - c := fromContext(ctx) - if c == nil { - // Give a good error message rather than a panic lower down. - return errNotAppEngineContext - } - - // Apply transaction modifications if we're in a transaction. - if t := transactionFromContext(ctx); t != nil { - if t.finished { - return errors.New("transaction context has expired") - } - applyTransaction(in, &t.transaction) - } - - // Default RPC timeout is 60s. - timeout := 60 * time.Second - if deadline, ok := ctx.Deadline(); ok { - timeout = deadline.Sub(time.Now()) - } - - data, err := proto.Marshal(in) - if err != nil { - return err - } - - ticket := c.req.Header.Get(ticketHeader) - // Use a test ticket under test environment. - if ticket == "" { - if appid := ctx.Value(&appIDOverrideKey); appid != nil { - ticket = appid.(string) + defaultTicketSuffix - } - } - // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. - if ticket == "" { - ticket = DefaultTicket() - } - req := &remotepb.Request{ - ServiceName: &service, - Method: &method, - Request: data, - RequestId: &ticket, - } - hreqBody, err := proto.Marshal(req) - if err != nil { - return err - } - - hrespBody, err := c.post(hreqBody, timeout) - if err != nil { - return err - } - - res := &remotepb.Response{} - if err := proto.Unmarshal(hrespBody, res); err != nil { - return err - } - if res.RpcError != nil { - ce := &CallError{ - Detail: res.RpcError.GetDetail(), - Code: *res.RpcError.Code, - } - switch remotepb.RpcError_ErrorCode(ce.Code) { - case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: - ce.Timeout = true - } - return ce - } - if res.ApplicationError != nil { - return &APIError{ - Service: *req.ServiceName, - Detail: res.ApplicationError.GetDetail(), - Code: *res.ApplicationError.Code, - } - } - if res.Exception != nil || res.JavaException != nil { - // This shouldn't happen, but let's be defensive. - return &CallError{ - Detail: "service bridge returned exception", - Code: int32(remotepb.RpcError_UNKNOWN), - } - } - return proto.Unmarshal(res.Response, out) -} - -func (c *context) Request() *http.Request { - return c.req -} - -func (c *context) addLogLine(ll *logpb.UserAppLogLine) { - // Truncate long log lines. - // TODO(dsymonds): Check if this is still necessary. - const lim = 8 << 10 - if len(*ll.Message) > lim { - suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) - ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) - } - - c.pendingLogs.Lock() - c.pendingLogs.lines = append(c.pendingLogs.lines, ll) - c.pendingLogs.Unlock() -} - -var logLevelName = map[int64]string{ - 0: "DEBUG", - 1: "INFO", - 2: "WARNING", - 3: "ERROR", - 4: "CRITICAL", -} - -func logf(c *context, level int64, format string, args ...interface{}) { - if c == nil { - panic("not an App Engine context") - } - s := fmt.Sprintf(format, args...) - s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. - c.addLogLine(&logpb.UserAppLogLine{ - TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), - Level: &level, - Message: &s, - }) - log.Print(logLevelName[level] + ": " + s) -} - -// flushLog attempts to flush any pending logs to the appserver. -// It should not be called concurrently. -func (c *context) flushLog(force bool) (flushed bool) { - c.pendingLogs.Lock() - // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. - n, rem := 0, 30<<20 - for ; n < len(c.pendingLogs.lines); n++ { - ll := c.pendingLogs.lines[n] - // Each log line will require about 3 bytes of overhead. - nb := proto.Size(ll) + 3 - if nb > rem { - break - } - rem -= nb - } - lines := c.pendingLogs.lines[:n] - c.pendingLogs.lines = c.pendingLogs.lines[n:] - c.pendingLogs.Unlock() - - if len(lines) == 0 && !force { - // Nothing to flush. - return false - } - - rescueLogs := false - defer func() { - if rescueLogs { - c.pendingLogs.Lock() - c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) - c.pendingLogs.Unlock() - } - }() - - buf, err := proto.Marshal(&logpb.UserAppLogGroup{ - LogLine: lines, - }) - if err != nil { - log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) - rescueLogs = true - return false - } - - req := &logpb.FlushRequest{ - Logs: buf, - } - res := &basepb.VoidProto{} - c.pendingLogs.Lock() - c.pendingLogs.flushes++ - c.pendingLogs.Unlock() - if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { - log.Printf("internal.flushLog: Flush RPC: %v", err) - rescueLogs = true - return false - } - return true -} - -const ( - // Log flushing parameters. - flushInterval = 1 * time.Second - forceFlushInterval = 60 * time.Second -) - -func (c *context) logFlusher(stop <-chan int) { - lastFlush := time.Now() - tick := time.NewTicker(flushInterval) - for { - select { - case <-stop: - // Request finished. - tick.Stop() - return - case <-tick.C: - force := time.Now().Sub(lastFlush) > forceFlushInterval - if c.flushLog(force) { - lastFlush = time.Now() - } - } - } -} - -func ContextForTesting(req *http.Request) netcontext.Context { - return toContext(&context{req: req}) -} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go index d538701ab3b2b..9b4134e425732 100644 --- a/vendor/google.golang.org/appengine/internal/identity.go +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -4,11 +4,52 @@ package internal -import netcontext "golang.org/x/net/context" +import ( + "os" -// These functions are implementations of the wrapper functions -// in ../appengine/identity.go. See that file for commentary. + netcontext "golang.org/x/net/context" +) +var ( + // This is set to true in identity_classic.go, which is behind the appengine build tag. + // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not + // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a + // first-gen runtime. See IsStandard below for the second-gen check. + appengineStandard bool + + // This is set to true in identity_flex.go, which is behind the appenginevm build tag. + appengineFlex bool +) + +// AppID is the implementation of the wrapper function of the same name in +// ../identity.go. See that file for commentary. func AppID(c netcontext.Context) string { return appID(FullyQualifiedAppID(c)) } + +// IsStandard is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsStandard() bool { + // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not + // second-gen (>= Go 1.11). + return appengineStandard || IsSecondGen() +} + +// IsStandard is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsSecondGen() bool { + // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. + return os.Getenv("GAE_ENV") == "standard" +} + +// IsFlex is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsFlex() bool { + return appengineFlex +} + +// IsAppEngine is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsAppEngine() bool { + return IsStandard() || IsFlex() +} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go index b59603f13293e..4e979f45e34d5 100644 --- a/vendor/google.golang.org/appengine/internal/identity_classic.go +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -12,6 +12,10 @@ import ( netcontext "golang.org/x/net/context" ) +func init() { + appengineStandard = true +} + func DefaultVersionHostname(ctx netcontext.Context) string { c := fromContext(ctx) if c == nil { diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go new file mode 100644 index 0000000000000..d5e2e7b5e3f8c --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -0,0 +1,11 @@ +// Copyright 2018 Google LLC. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appenginevm + +package internal + +func init() { + appengineFlex = true +} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go index 49036163c2b0a..1e765312fd180 100644 --- a/vendor/google.golang.org/appengine/internal/main.go +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -11,5 +11,6 @@ import ( ) func Main() { + MainPath = "" appengine_internal.Main() } diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go new file mode 100644 index 0000000000000..357dce4dd0129 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main_common.go @@ -0,0 +1,7 @@ +package internal + +// MainPath stores the file path of the main package. On App Engine Standard +// using Go version 1.9 and below, this will be unset. On App Engine Flex and +// App Engine Standard second-gen (Go 1.11 and above), this will be the +// filepath to package main. +var MainPath string diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go index 822e784a458d9..ddb79a333879a 100644 --- a/vendor/google.golang.org/appengine/internal/main_vm.go +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -12,9 +12,12 @@ import ( "net/http" "net/url" "os" + "path/filepath" + "runtime" ) func Main() { + MainPath = filepath.Dir(findMainPath()) installHealthChecker(http.DefaultServeMux) port := "8080" @@ -31,6 +34,24 @@ func Main() { } } +// Find the path to package main by looking at the root Caller. +func findMainPath() string { + pc := make([]uintptr, 100) + n := runtime.Callers(2, pc) + frames := runtime.CallersFrames(pc[:n]) + for { + frame, more := frames.Next() + // Tests won't have package main, instead they have testing.tRunner + if frame.Function == "main.main" || frame.Function == "testing.tRunner" { + return frame.File + } + if !more { + break + } + } + return "" +} + func installHealthChecker(mux *http.ServeMux) { // If no health check handler has been installed by this point, add a trivial one. const healthPath = "/_ah/health" diff --git a/vendor/modules.txt b/vendor/modules.txt index e7f91d5bc2d12..2b8a569cba564 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -85,7 +85,7 @@ github.com/couchbase/vellum/utf8 github.com/couchbaselabs/go-couchbase # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/denisenkom/go-mssqldb v0.0.0-20181014144952-4e0d7dc8888f => github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449 +# github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 => github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449 github.com/denisenkom/go-mssqldb # github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac github.com/dgrijalva/jwt-go @@ -147,13 +147,9 @@ github.com/go-redis/redis/internal/hashtag github.com/go-redis/redis/internal/pool github.com/go-redis/redis/internal/proto github.com/go-redis/redis/internal/util -# github.com/go-sql-driver/mysql v1.4.0 => github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f +# github.com/go-sql-driver/mysql v1.4.1 => github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f github.com/go-sql-driver/mysql -# github.com/go-xorm/builder v0.3.3 -github.com/go-xorm/builder -# github.com/go-xorm/core v0.6.0 -github.com/go-xorm/core -# github.com/go-xorm/xorm v0.0.0-20190116032649-a6300f2a45e0 +# github.com/go-xorm/xorm v0.7.3-0.20190620151208-f1b4f8368459 github.com/go-xorm/xorm # github.com/gogits/chardet v0.0.0-20150115103509-2404f7772561 github.com/gogits/chardet @@ -396,7 +392,7 @@ golang.org/x/text/internal/language/compact golang.org/x/text/internal/utf8internal golang.org/x/text/runes golang.org/x/text/internal/tag -# google.golang.org/appengine v1.2.0 +# google.golang.org/appengine v1.4.0 google.golang.org/appengine/cloudsql google.golang.org/appengine/urlfetch google.golang.org/appengine/internal @@ -481,3 +477,7 @@ gopkg.in/yaml.v2 mvdan.cc/xurls/v2 # strk.kbt.io/projects/go/libravatar v0.0.0-20160628055650-5eed7bff870a strk.kbt.io/projects/go/libravatar +# xorm.io/builder v0.3.5 +xorm.io/builder +# xorm.io/core v0.6.3 +xorm.io/core diff --git a/vendor/github.com/go-xorm/builder/.drone.yml b/vendor/xorm.io/builder/.drone.yml similarity index 100% rename from vendor/github.com/go-xorm/builder/.drone.yml rename to vendor/xorm.io/builder/.drone.yml diff --git a/vendor/github.com/go-xorm/builder/LICENSE b/vendor/xorm.io/builder/LICENSE similarity index 100% rename from vendor/github.com/go-xorm/builder/LICENSE rename to vendor/xorm.io/builder/LICENSE diff --git a/vendor/github.com/go-xorm/builder/README.md b/vendor/xorm.io/builder/README.md similarity index 100% rename from vendor/github.com/go-xorm/builder/README.md rename to vendor/xorm.io/builder/README.md diff --git a/vendor/github.com/go-xorm/builder/builder.go b/vendor/xorm.io/builder/builder.go similarity index 100% rename from vendor/github.com/go-xorm/builder/builder.go rename to vendor/xorm.io/builder/builder.go diff --git a/vendor/github.com/go-xorm/builder/builder_delete.go b/vendor/xorm.io/builder/builder_delete.go similarity index 100% rename from vendor/github.com/go-xorm/builder/builder_delete.go rename to vendor/xorm.io/builder/builder_delete.go diff --git a/vendor/github.com/go-xorm/builder/builder_insert.go b/vendor/xorm.io/builder/builder_insert.go similarity index 100% rename from vendor/github.com/go-xorm/builder/builder_insert.go rename to vendor/xorm.io/builder/builder_insert.go diff --git a/vendor/github.com/go-xorm/builder/builder_limit.go b/vendor/xorm.io/builder/builder_limit.go similarity index 100% rename from vendor/github.com/go-xorm/builder/builder_limit.go rename to vendor/xorm.io/builder/builder_limit.go diff --git a/vendor/github.com/go-xorm/builder/builder_select.go b/vendor/xorm.io/builder/builder_select.go similarity index 100% rename from vendor/github.com/go-xorm/builder/builder_select.go rename to vendor/xorm.io/builder/builder_select.go diff --git a/vendor/github.com/go-xorm/builder/builder_union.go b/vendor/xorm.io/builder/builder_union.go similarity index 100% rename from vendor/github.com/go-xorm/builder/builder_union.go rename to vendor/xorm.io/builder/builder_union.go diff --git a/vendor/github.com/go-xorm/builder/builder_update.go b/vendor/xorm.io/builder/builder_update.go similarity index 100% rename from vendor/github.com/go-xorm/builder/builder_update.go rename to vendor/xorm.io/builder/builder_update.go diff --git a/vendor/github.com/go-xorm/builder/cond.go b/vendor/xorm.io/builder/cond.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond.go rename to vendor/xorm.io/builder/cond.go diff --git a/vendor/github.com/go-xorm/builder/cond_and.go b/vendor/xorm.io/builder/cond_and.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_and.go rename to vendor/xorm.io/builder/cond_and.go diff --git a/vendor/github.com/go-xorm/builder/cond_between.go b/vendor/xorm.io/builder/cond_between.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_between.go rename to vendor/xorm.io/builder/cond_between.go diff --git a/vendor/github.com/go-xorm/builder/cond_compare.go b/vendor/xorm.io/builder/cond_compare.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_compare.go rename to vendor/xorm.io/builder/cond_compare.go diff --git a/vendor/github.com/go-xorm/builder/cond_eq.go b/vendor/xorm.io/builder/cond_eq.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_eq.go rename to vendor/xorm.io/builder/cond_eq.go diff --git a/vendor/github.com/go-xorm/builder/cond_expr.go b/vendor/xorm.io/builder/cond_expr.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_expr.go rename to vendor/xorm.io/builder/cond_expr.go diff --git a/vendor/xorm.io/builder/cond_if.go b/vendor/xorm.io/builder/cond_if.go new file mode 100644 index 0000000000000..af9eb321fda98 --- /dev/null +++ b/vendor/xorm.io/builder/cond_if.go @@ -0,0 +1,49 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package builder + +type condIf struct { + condition bool + condTrue Cond + condFalse Cond +} + +var _ Cond = condIf{} + +// If returns Cond via condition +func If(condition bool, condTrue Cond, condFalse ...Cond) Cond { + var c = condIf{ + condition: condition, + condTrue: condTrue, + } + if len(condFalse) > 0 { + c.condFalse = condFalse[0] + } + return c +} + +func (condIf condIf) WriteTo(w Writer) error { + if condIf.condition { + return condIf.condTrue.WriteTo(w) + } else if condIf.condFalse != nil { + return condIf.condFalse.WriteTo(w) + } + return nil +} + +func (condIf condIf) And(conds ...Cond) Cond { + return And(condIf, And(conds...)) +} + +func (condIf condIf) Or(conds ...Cond) Cond { + return Or(condIf, Or(conds...)) +} + +func (condIf condIf) IsValid() bool { + if condIf.condition { + return condIf.condTrue != nil + } + return condIf.condFalse != nil +} diff --git a/vendor/github.com/go-xorm/builder/cond_in.go b/vendor/xorm.io/builder/cond_in.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_in.go rename to vendor/xorm.io/builder/cond_in.go diff --git a/vendor/github.com/go-xorm/builder/cond_like.go b/vendor/xorm.io/builder/cond_like.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_like.go rename to vendor/xorm.io/builder/cond_like.go diff --git a/vendor/github.com/go-xorm/builder/cond_neq.go b/vendor/xorm.io/builder/cond_neq.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_neq.go rename to vendor/xorm.io/builder/cond_neq.go diff --git a/vendor/github.com/go-xorm/builder/cond_not.go b/vendor/xorm.io/builder/cond_not.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_not.go rename to vendor/xorm.io/builder/cond_not.go diff --git a/vendor/github.com/go-xorm/builder/cond_notin.go b/vendor/xorm.io/builder/cond_notin.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_notin.go rename to vendor/xorm.io/builder/cond_notin.go diff --git a/vendor/github.com/go-xorm/builder/cond_null.go b/vendor/xorm.io/builder/cond_null.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_null.go rename to vendor/xorm.io/builder/cond_null.go diff --git a/vendor/github.com/go-xorm/builder/cond_or.go b/vendor/xorm.io/builder/cond_or.go similarity index 100% rename from vendor/github.com/go-xorm/builder/cond_or.go rename to vendor/xorm.io/builder/cond_or.go diff --git a/vendor/github.com/go-xorm/builder/doc.go b/vendor/xorm.io/builder/doc.go similarity index 100% rename from vendor/github.com/go-xorm/builder/doc.go rename to vendor/xorm.io/builder/doc.go diff --git a/vendor/github.com/go-xorm/builder/error.go b/vendor/xorm.io/builder/error.go similarity index 100% rename from vendor/github.com/go-xorm/builder/error.go rename to vendor/xorm.io/builder/error.go diff --git a/vendor/xorm.io/builder/go.mod b/vendor/xorm.io/builder/go.mod new file mode 100644 index 0000000000000..35e43b329ff23 --- /dev/null +++ b/vendor/xorm.io/builder/go.mod @@ -0,0 +1,6 @@ +module xorm.io/builder + +require ( + github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/xorm.io/builder/go.sum b/vendor/xorm.io/builder/go.sum new file mode 100644 index 0000000000000..468ba4a2d566f --- /dev/null +++ b/vendor/xorm.io/builder/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y= +github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:56xuuqnHyryaerycW3BfssRdxQstACi0Epw/yC5E2xM= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/go-xorm/builder/sql.go b/vendor/xorm.io/builder/sql.go similarity index 100% rename from vendor/github.com/go-xorm/builder/sql.go rename to vendor/xorm.io/builder/sql.go diff --git a/vendor/github.com/go-xorm/builder/string_builder.go b/vendor/xorm.io/builder/string_builder.go similarity index 100% rename from vendor/github.com/go-xorm/builder/string_builder.go rename to vendor/xorm.io/builder/string_builder.go diff --git a/vendor/github.com/go-xorm/core/.gitignore b/vendor/xorm.io/core/.gitignore similarity index 100% rename from vendor/github.com/go-xorm/core/.gitignore rename to vendor/xorm.io/core/.gitignore diff --git a/vendor/github.com/go-xorm/core/LICENSE b/vendor/xorm.io/core/LICENSE similarity index 100% rename from vendor/github.com/go-xorm/core/LICENSE rename to vendor/xorm.io/core/LICENSE diff --git a/vendor/github.com/go-xorm/core/README.md b/vendor/xorm.io/core/README.md similarity index 100% rename from vendor/github.com/go-xorm/core/README.md rename to vendor/xorm.io/core/README.md diff --git a/vendor/github.com/go-xorm/core/benchmark.sh b/vendor/xorm.io/core/benchmark.sh similarity index 100% rename from vendor/github.com/go-xorm/core/benchmark.sh rename to vendor/xorm.io/core/benchmark.sh diff --git a/vendor/github.com/go-xorm/core/cache.go b/vendor/xorm.io/core/cache.go similarity index 92% rename from vendor/github.com/go-xorm/core/cache.go rename to vendor/xorm.io/core/cache.go index 8f9531da94012..dc4992dfb112a 100644 --- a/vendor/github.com/go-xorm/core/cache.go +++ b/vendor/xorm.io/core/cache.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( diff --git a/vendor/github.com/go-xorm/core/column.go b/vendor/xorm.io/core/column.go similarity index 88% rename from vendor/github.com/go-xorm/core/column.go rename to vendor/xorm.io/core/column.go index 20912b713c419..40d8f9268d727 100644 --- a/vendor/github.com/go-xorm/core/column.go +++ b/vendor/xorm.io/core/column.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( @@ -41,6 +45,7 @@ type Column struct { Comment string } +// NewColumn creates a new column func NewColumn(name, fieldName string, sqlType SQLType, len1, len2 int, nullable bool) *Column { return &Column{ Name: name, @@ -66,7 +71,7 @@ func NewColumn(name, fieldName string, sqlType SQLType, len1, len2 int, nullable } } -// generate column description string according dialect +// String generate column description string according dialect func (col *Column) String(d Dialect) string { sql := d.QuoteStr() + col.Name + d.QuoteStr() + " " @@ -94,6 +99,7 @@ func (col *Column) String(d Dialect) string { return sql } +// StringNoPk generate column description string according dialect without primary keys func (col *Column) StringNoPk(d Dialect) string { sql := d.QuoteStr() + col.Name + d.QuoteStr() + " " @@ -114,12 +120,13 @@ func (col *Column) StringNoPk(d Dialect) string { return sql } -// return col's filed of struct's value +// ValueOf returns column's filed of struct's value func (col *Column) ValueOf(bean interface{}) (*reflect.Value, error) { dataStruct := reflect.Indirect(reflect.ValueOf(bean)) return col.ValueOfV(&dataStruct) } +// ValueOfV returns column's filed of struct's value accept reflevt value func (col *Column) ValueOfV(dataStruct *reflect.Value) (*reflect.Value, error) { var fieldValue reflect.Value fieldPath := strings.Split(col.FieldName, ".") diff --git a/vendor/github.com/go-xorm/core/converstion.go b/vendor/xorm.io/core/converstion.go similarity index 59% rename from vendor/github.com/go-xorm/core/converstion.go rename to vendor/xorm.io/core/converstion.go index 18522fbeebd9f..9703c36e08561 100644 --- a/vendor/github.com/go-xorm/core/converstion.go +++ b/vendor/xorm.io/core/converstion.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core // Conversion is an interface. A type implements Conversion will according diff --git a/vendor/xorm.io/core/db.go b/vendor/xorm.io/core/db.go new file mode 100644 index 0000000000000..3e50a14795da9 --- /dev/null +++ b/vendor/xorm.io/core/db.go @@ -0,0 +1,223 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "regexp" + "sync" +) + +var ( + DefaultCacheSize = 200 +) + +func MapToSlice(query string, mp interface{}) (string, []interface{}, error) { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return "", []interface{}{}, ErrNoMapPointer + } + + args := make([]interface{}, 0, len(vv.Elem().MapKeys())) + var err error + query = re.ReplaceAllStringFunc(query, func(src string) string { + v := vv.Elem().MapIndex(reflect.ValueOf(src[1:])) + if !v.IsValid() { + err = fmt.Errorf("map key %s is missing", src[1:]) + } else { + args = append(args, v.Interface()) + } + return "?" + }) + + return query, args, err +} + +func StructToSlice(query string, st interface{}) (string, []interface{}, error) { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return "", []interface{}{}, ErrNoStructPointer + } + + args := make([]interface{}, 0) + var err error + query = re.ReplaceAllStringFunc(query, func(src string) string { + fv := vv.Elem().FieldByName(src[1:]).Interface() + if v, ok := fv.(driver.Valuer); ok { + var value driver.Value + value, err = v.Value() + if err != nil { + return "?" + } + args = append(args, value) + } else { + args = append(args, fv) + } + return "?" + }) + if err != nil { + return "", []interface{}{}, err + } + return query, args, nil +} + +type cacheStruct struct { + value reflect.Value + idx int +} + +// DB is a wrap of sql.DB with extra contents +type DB struct { + *sql.DB + Mapper IMapper + reflectCache map[reflect.Type]*cacheStruct + reflectCacheMutex sync.RWMutex +} + +// Open opens a database +func Open(driverName, dataSourceName string) (*DB, error) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return &DB{ + DB: db, + Mapper: NewCacheMapper(&SnakeMapper{}), + reflectCache: make(map[reflect.Type]*cacheStruct), + }, nil +} + +// FromDB creates a DB from a sql.DB +func FromDB(db *sql.DB) *DB { + return &DB{ + DB: db, + Mapper: NewCacheMapper(&SnakeMapper{}), + reflectCache: make(map[reflect.Type]*cacheStruct), + } +} + +func (db *DB) reflectNew(typ reflect.Type) reflect.Value { + db.reflectCacheMutex.Lock() + defer db.reflectCacheMutex.Unlock() + cs, ok := db.reflectCache[typ] + if !ok || cs.idx+1 > DefaultCacheSize-1 { + cs = &cacheStruct{reflect.MakeSlice(reflect.SliceOf(typ), DefaultCacheSize, DefaultCacheSize), 0} + db.reflectCache[typ] = cs + } else { + cs.idx = cs.idx + 1 + } + return cs.value.Index(cs.idx).Addr() +} + +// QueryContext overwrites sql.DB.QueryContext +func (db *DB) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + rows, err := db.DB.QueryContext(ctx, query, args...) + if err != nil { + if rows != nil { + rows.Close() + } + return nil, err + } + return &Rows{rows, db}, nil +} + +// Query overwrites sql.DB.Query +func (db *DB) Query(query string, args ...interface{}) (*Rows, error) { + return db.QueryContext(context.Background(), query, args...) +} + +func (db *DB) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return db.QueryContext(ctx, query, args...) +} + +func (db *DB) QueryMap(query string, mp interface{}) (*Rows, error) { + return db.QueryMapContext(context.Background(), query, mp) +} + +func (db *DB) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return db.QueryContext(ctx, query, args...) +} + +func (db *DB) QueryStruct(query string, st interface{}) (*Rows, error) { + return db.QueryStructContext(context.Background(), query, st) +} + +func (db *DB) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := db.QueryContext(ctx, query, args...) + if err != nil { + return &Row{nil, err} + } + return &Row{rows, nil} +} + +func (db *DB) QueryRow(query string, args ...interface{}) *Row { + return db.QueryRowContext(context.Background(), query, args...) +} + +func (db *DB) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row { + query, args, err := MapToSlice(query, mp) + if err != nil { + return &Row{nil, err} + } + return db.QueryRowContext(ctx, query, args...) +} + +func (db *DB) QueryRowMap(query string, mp interface{}) *Row { + return db.QueryRowMapContext(context.Background(), query, mp) +} + +func (db *DB) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row { + query, args, err := StructToSlice(query, st) + if err != nil { + return &Row{nil, err} + } + return db.QueryRowContext(ctx, query, args...) +} + +func (db *DB) QueryRowStruct(query string, st interface{}) *Row { + return db.QueryRowStructContext(context.Background(), query, st) +} + +var ( + re = regexp.MustCompile(`[?](\w+)`) +) + +// insert into (name) values (?) +// insert into (name) values (?name) +func (db *DB) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return db.DB.ExecContext(ctx, query, args...) +} + +func (db *DB) ExecMap(query string, mp interface{}) (sql.Result, error) { + return db.ExecMapContext(context.Background(), query, mp) +} + +func (db *DB) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return db.DB.ExecContext(ctx, query, args...) +} + +func (db *DB) ExecStruct(query string, st interface{}) (sql.Result, error) { + return db.ExecStructContext(context.Background(), query, st) +} diff --git a/vendor/github.com/go-xorm/core/dialect.go b/vendor/xorm.io/core/dialect.go similarity index 97% rename from vendor/github.com/go-xorm/core/dialect.go rename to vendor/xorm.io/core/dialect.go index c288a0847837f..5d35a4f11d980 100644 --- a/vendor/github.com/go-xorm/core/dialect.go +++ b/vendor/xorm.io/core/dialect.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( diff --git a/vendor/github.com/go-xorm/core/driver.go b/vendor/xorm.io/core/driver.go similarity index 75% rename from vendor/github.com/go-xorm/core/driver.go rename to vendor/xorm.io/core/driver.go index 0f1020b403ba8..ceef4ba61822e 100644 --- a/vendor/github.com/go-xorm/core/driver.go +++ b/vendor/xorm.io/core/driver.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core type Driver interface { diff --git a/vendor/github.com/go-xorm/core/error.go b/vendor/xorm.io/core/error.go similarity index 51% rename from vendor/github.com/go-xorm/core/error.go rename to vendor/xorm.io/core/error.go index 640e6036e6681..63ea53e466c11 100644 --- a/vendor/github.com/go-xorm/core/error.go +++ b/vendor/xorm.io/core/error.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import "errors" diff --git a/vendor/github.com/go-xorm/core/filter.go b/vendor/xorm.io/core/filter.go similarity index 90% rename from vendor/github.com/go-xorm/core/filter.go rename to vendor/xorm.io/core/filter.go index 35b0ece676484..6aeed4244c1ea 100644 --- a/vendor/github.com/go-xorm/core/filter.go +++ b/vendor/xorm.io/core/filter.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( diff --git a/vendor/xorm.io/core/go.mod b/vendor/xorm.io/core/go.mod new file mode 100644 index 0000000000000..2703545e69bec --- /dev/null +++ b/vendor/xorm.io/core/go.mod @@ -0,0 +1,7 @@ +module xorm.io/core + +require ( + github.com/go-sql-driver/mysql v1.4.1 + github.com/mattn/go-sqlite3 v1.10.0 + google.golang.org/appengine v1.4.0 // indirect +) diff --git a/vendor/xorm.io/core/go.sum b/vendor/xorm.io/core/go.sum new file mode 100644 index 0000000000000..8f20f8bc9017f --- /dev/null +++ b/vendor/xorm.io/core/go.sum @@ -0,0 +1,9 @@ +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/mattn/go-sqlite3 v1.10.0 h1:jbhqpg7tQe4SupckyijYiy0mJJ/pRyHvXf7JdWK860o= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/github.com/go-xorm/core/ilogger.go b/vendor/xorm.io/core/ilogger.go similarity index 78% rename from vendor/github.com/go-xorm/core/ilogger.go rename to vendor/xorm.io/core/ilogger.go index c8d78496054c7..348ab88f4f0f0 100644 --- a/vendor/github.com/go-xorm/core/ilogger.go +++ b/vendor/xorm.io/core/ilogger.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core type LogLevel int diff --git a/vendor/github.com/go-xorm/core/index.go b/vendor/xorm.io/core/index.go similarity index 78% rename from vendor/github.com/go-xorm/core/index.go rename to vendor/xorm.io/core/index.go index 9aa1b7ac99ba3..ac97b6850530f 100644 --- a/vendor/github.com/go-xorm/core/index.go +++ b/vendor/xorm.io/core/index.go @@ -1,8 +1,11 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( "fmt" - "sort" "strings" ) @@ -46,11 +49,16 @@ func (index *Index) Equal(dst *Index) bool { if len(index.Cols) != len(dst.Cols) { return false } - sort.StringSlice(index.Cols).Sort() - sort.StringSlice(dst.Cols).Sort() for i := 0; i < len(index.Cols); i++ { - if index.Cols[i] != dst.Cols[i] { + var found bool + for j := 0; j < len(dst.Cols); j++ { + if index.Cols[i] == dst.Cols[j] { + found = true + break + } + } + if !found { return false } } diff --git a/vendor/github.com/go-xorm/core/mapper.go b/vendor/xorm.io/core/mapper.go similarity index 96% rename from vendor/github.com/go-xorm/core/mapper.go rename to vendor/xorm.io/core/mapper.go index bb72a156624ee..ec44ea0db9b74 100644 --- a/vendor/github.com/go-xorm/core/mapper.go +++ b/vendor/xorm.io/core/mapper.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( diff --git a/vendor/github.com/go-xorm/core/pk.go b/vendor/xorm.io/core/pk.go similarity index 72% rename from vendor/github.com/go-xorm/core/pk.go rename to vendor/xorm.io/core/pk.go index 1810dd944be44..05a7672d86b35 100644 --- a/vendor/github.com/go-xorm/core/pk.go +++ b/vendor/xorm.io/core/pk.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( diff --git a/vendor/github.com/go-xorm/core/rows.go b/vendor/xorm.io/core/rows.go similarity index 97% rename from vendor/github.com/go-xorm/core/rows.go rename to vendor/xorm.io/core/rows.go index 580de4f9c669c..2b046d84cc7d4 100644 --- a/vendor/github.com/go-xorm/core/rows.go +++ b/vendor/xorm.io/core/rows.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( diff --git a/vendor/github.com/go-xorm/core/scan.go b/vendor/xorm.io/core/scan.go similarity index 79% rename from vendor/github.com/go-xorm/core/scan.go rename to vendor/xorm.io/core/scan.go index b7c159b2740c1..897b534159ed1 100644 --- a/vendor/github.com/go-xorm/core/scan.go +++ b/vendor/xorm.io/core/scan.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( @@ -53,3 +57,10 @@ func convertTime(dest *NullTime, src interface{}) error { } return nil } + +type EmptyScanner struct { +} + +func (EmptyScanner) Scan(src interface{}) error { + return nil +} diff --git a/vendor/xorm.io/core/stmt.go b/vendor/xorm.io/core/stmt.go new file mode 100644 index 0000000000000..20ee202b9b743 --- /dev/null +++ b/vendor/xorm.io/core/stmt.go @@ -0,0 +1,165 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "database/sql" + "errors" + "reflect" +) + +type Stmt struct { + *sql.Stmt + db *DB + names map[string]int +} + +func (db *DB) PrepareContext(ctx context.Context, query string) (*Stmt, error) { + names := make(map[string]int) + var i int + query = re.ReplaceAllStringFunc(query, func(src string) string { + names[src[1:]] = i + i += 1 + return "?" + }) + + stmt, err := db.DB.PrepareContext(ctx, query) + if err != nil { + return nil, err + } + return &Stmt{stmt, db, names}, nil +} + +func (db *DB) Prepare(query string) (*Stmt, error) { + return db.PrepareContext(context.Background(), query) +} + +func (s *Stmt) ExecMapContext(ctx context.Context, mp interface{}) (sql.Result, error) { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() + } + return s.Stmt.ExecContext(ctx, args...) +} + +func (s *Stmt) ExecMap(mp interface{}) (sql.Result, error) { + return s.ExecMapContext(context.Background(), mp) +} + +func (s *Stmt) ExecStructContext(ctx context.Context, st interface{}) (sql.Result, error) { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().FieldByName(k).Interface() + } + return s.Stmt.ExecContext(ctx, args...) +} + +func (s *Stmt) ExecStruct(st interface{}) (sql.Result, error) { + return s.ExecStructContext(context.Background(), st) +} + +func (s *Stmt) QueryContext(ctx context.Context, args ...interface{}) (*Rows, error) { + rows, err := s.Stmt.QueryContext(ctx, args...) + if err != nil { + return nil, err + } + return &Rows{rows, s.db}, nil +} + +func (s *Stmt) Query(args ...interface{}) (*Rows, error) { + return s.QueryContext(context.Background(), args...) +} + +func (s *Stmt) QueryMapContext(ctx context.Context, mp interface{}) (*Rows, error) { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() + } + + return s.QueryContext(ctx, args...) +} + +func (s *Stmt) QueryMap(mp interface{}) (*Rows, error) { + return s.QueryMapContext(context.Background(), mp) +} + +func (s *Stmt) QueryStructContext(ctx context.Context, st interface{}) (*Rows, error) { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return nil, errors.New("mp should be a map's pointer") + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().FieldByName(k).Interface() + } + + return s.Query(args...) +} + +func (s *Stmt) QueryStruct(st interface{}) (*Rows, error) { + return s.QueryStructContext(context.Background(), st) +} + +func (s *Stmt) QueryRowContext(ctx context.Context, args ...interface{}) *Row { + rows, err := s.QueryContext(ctx, args...) + return &Row{rows, err} +} + +func (s *Stmt) QueryRow(args ...interface{}) *Row { + return s.QueryRowContext(context.Background(), args...) +} + +func (s *Stmt) QueryRowMapContext(ctx context.Context, mp interface{}) *Row { + vv := reflect.ValueOf(mp) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Map { + return &Row{nil, errors.New("mp should be a map's pointer")} + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().MapIndex(reflect.ValueOf(k)).Interface() + } + + return s.QueryRowContext(ctx, args...) +} + +func (s *Stmt) QueryRowMap(mp interface{}) *Row { + return s.QueryRowMapContext(context.Background(), mp) +} + +func (s *Stmt) QueryRowStructContext(ctx context.Context, st interface{}) *Row { + vv := reflect.ValueOf(st) + if vv.Kind() != reflect.Ptr || vv.Elem().Kind() != reflect.Struct { + return &Row{nil, errors.New("st should be a struct's pointer")} + } + + args := make([]interface{}, len(s.names)) + for k, i := range s.names { + args[i] = vv.Elem().FieldByName(k).Interface() + } + + return s.QueryRowContext(ctx, args...) +} + +func (s *Stmt) QueryRowStruct(st interface{}) *Row { + return s.QueryRowStructContext(context.Background(), st) +} diff --git a/vendor/github.com/go-xorm/core/table.go b/vendor/xorm.io/core/table.go similarity index 95% rename from vendor/github.com/go-xorm/core/table.go rename to vendor/xorm.io/core/table.go index b5d079404c8eb..d129e60f8b914 100644 --- a/vendor/github.com/go-xorm/core/table.go +++ b/vendor/xorm.io/core/table.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( diff --git a/vendor/xorm.io/core/tx.go b/vendor/xorm.io/core/tx.go new file mode 100644 index 0000000000000..a56b70063eb9f --- /dev/null +++ b/vendor/xorm.io/core/tx.go @@ -0,0 +1,153 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package core + +import ( + "context" + "database/sql" +) + +type Tx struct { + *sql.Tx + db *DB +} + +func (db *DB) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := db.DB.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{tx, db}, nil +} + +func (db *DB) Begin() (*Tx, error) { + tx, err := db.DB.Begin() + if err != nil { + return nil, err + } + return &Tx{tx, db}, nil +} + +func (tx *Tx) PrepareContext(ctx context.Context, query string) (*Stmt, error) { + names := make(map[string]int) + var i int + query = re.ReplaceAllStringFunc(query, func(src string) string { + names[src[1:]] = i + i += 1 + return "?" + }) + + stmt, err := tx.Tx.PrepareContext(ctx, query) + if err != nil { + return nil, err + } + return &Stmt{stmt, tx.db, names}, nil +} + +func (tx *Tx) Prepare(query string) (*Stmt, error) { + return tx.PrepareContext(context.Background(), query) +} + +func (tx *Tx) StmtContext(ctx context.Context, stmt *Stmt) *Stmt { + stmt.Stmt = tx.Tx.StmtContext(ctx, stmt.Stmt) + return stmt +} + +func (tx *Tx) Stmt(stmt *Stmt) *Stmt { + return tx.StmtContext(context.Background(), stmt) +} + +func (tx *Tx) ExecMapContext(ctx context.Context, query string, mp interface{}) (sql.Result, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return tx.Tx.ExecContext(ctx, query, args...) +} + +func (tx *Tx) ExecMap(query string, mp interface{}) (sql.Result, error) { + return tx.ExecMapContext(context.Background(), query, mp) +} + +func (tx *Tx) ExecStructContext(ctx context.Context, query string, st interface{}) (sql.Result, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return tx.Tx.ExecContext(ctx, query, args...) +} + +func (tx *Tx) ExecStruct(query string, st interface{}) (sql.Result, error) { + return tx.ExecStructContext(context.Background(), query, st) +} + +func (tx *Tx) QueryContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + rows, err := tx.Tx.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{rows, tx.db}, nil +} + +func (tx *Tx) Query(query string, args ...interface{}) (*Rows, error) { + return tx.QueryContext(context.Background(), query, args...) +} + +func (tx *Tx) QueryMapContext(ctx context.Context, query string, mp interface{}) (*Rows, error) { + query, args, err := MapToSlice(query, mp) + if err != nil { + return nil, err + } + return tx.QueryContext(ctx, query, args...) +} + +func (tx *Tx) QueryMap(query string, mp interface{}) (*Rows, error) { + return tx.QueryMapContext(context.Background(), query, mp) +} + +func (tx *Tx) QueryStructContext(ctx context.Context, query string, st interface{}) (*Rows, error) { + query, args, err := StructToSlice(query, st) + if err != nil { + return nil, err + } + return tx.QueryContext(ctx, query, args...) +} + +func (tx *Tx) QueryStruct(query string, st interface{}) (*Rows, error) { + return tx.QueryStructContext(context.Background(), query, st) +} + +func (tx *Tx) QueryRowContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := tx.QueryContext(ctx, query, args...) + return &Row{rows, err} +} + +func (tx *Tx) QueryRow(query string, args ...interface{}) *Row { + return tx.QueryRowContext(context.Background(), query, args...) +} + +func (tx *Tx) QueryRowMapContext(ctx context.Context, query string, mp interface{}) *Row { + query, args, err := MapToSlice(query, mp) + if err != nil { + return &Row{nil, err} + } + return tx.QueryRowContext(ctx, query, args...) +} + +func (tx *Tx) QueryRowMap(query string, mp interface{}) *Row { + return tx.QueryRowMapContext(context.Background(), query, mp) +} + +func (tx *Tx) QueryRowStructContext(ctx context.Context, query string, st interface{}) *Row { + query, args, err := StructToSlice(query, st) + if err != nil { + return &Row{nil, err} + } + return tx.QueryRowContext(ctx, query, args...) +} + +func (tx *Tx) QueryRowStruct(query string, st interface{}) *Row { + return tx.QueryRowStructContext(context.Background(), query, st) +} diff --git a/vendor/github.com/go-xorm/core/type.go b/vendor/xorm.io/core/type.go similarity index 92% rename from vendor/github.com/go-xorm/core/type.go rename to vendor/xorm.io/core/type.go index 5cbf93057306e..8164953602e94 100644 --- a/vendor/github.com/go-xorm/core/type.go +++ b/vendor/xorm.io/core/type.go @@ -1,3 +1,7 @@ +// Copyright 2019 The Xorm Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package core import ( @@ -71,6 +75,7 @@ var ( Char = "CHAR" Varchar = "VARCHAR" + NChar = "NCHAR" NVarchar = "NVARCHAR" TinyText = "TINYTEXT" Text = "TEXT" @@ -84,12 +89,15 @@ var ( Date = "DATE" DateTime = "DATETIME" + SmallDateTime = "SMALLDATETIME" Time = "TIME" TimeStamp = "TIMESTAMP" TimeStampz = "TIMESTAMPZ" Decimal = "DECIMAL" Numeric = "NUMERIC" + Money = "MONEY" + SmallMoney = "SMALLMONEY" Real = "REAL" Float = "FLOAT" @@ -127,6 +135,7 @@ var ( Jsonb: TEXT_TYPE, Char: TEXT_TYPE, + NChar: TEXT_TYPE, Varchar: TEXT_TYPE, NVarchar: TEXT_TYPE, TinyText: TEXT_TYPE, @@ -143,12 +152,15 @@ var ( Time: TIME_TYPE, TimeStamp: TIME_TYPE, TimeStampz: TIME_TYPE, + SmallDateTime: TIME_TYPE, Decimal: NUMERIC_TYPE, Numeric: NUMERIC_TYPE, Real: NUMERIC_TYPE, Float: NUMERIC_TYPE, Double: NUMERIC_TYPE, + Money: NUMERIC_TYPE, + SmallMoney: NUMERIC_TYPE, Binary: BLOB_TYPE, VarBinary: BLOB_TYPE, @@ -295,15 +307,15 @@ func SQLType2Type(st SQLType) reflect.Type { return reflect.TypeOf(float32(1)) case Double: return reflect.TypeOf(float64(1)) - case Char, Varchar, NVarchar, TinyText, Text, NText, MediumText, LongText, Enum, Set, Uuid, Clob, SysName: + case Char, NChar, Varchar, NVarchar, TinyText, Text, NText, MediumText, LongText, Enum, Set, Uuid, Clob, SysName: return reflect.TypeOf("") case TinyBlob, Blob, LongBlob, Bytea, Binary, MediumBlob, VarBinary, UniqueIdentifier: return reflect.TypeOf([]byte{}) case Bool: return reflect.TypeOf(true) - case DateTime, Date, Time, TimeStamp, TimeStampz: + case DateTime, Date, Time, TimeStamp, TimeStampz, SmallDateTime: return reflect.TypeOf(c_TIME_DEFAULT) - case Decimal, Numeric: + case Decimal, Numeric, Money, SmallMoney: return reflect.TypeOf("") default: return reflect.TypeOf("") From 5492ee899f9b926a87c32603e3995194f2db3e66 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Thu, 20 Jun 2019 23:29:20 +0800 Subject: [PATCH 2/5] upgrade packages --- go.mod | 5 - go.sum | 9 +- vendor/cloud.google.com/go/AUTHORS | 15 + vendor/cloud.google.com/go/CONTRIBUTORS | 40 + vendor/cloud.google.com/go/LICENSE | 202 ++++ vendor/cloud.google.com/go/civil/civil.go | 277 ++++++ .../denisenkom/go-mssqldb/README.md | 247 ++++- .../denisenkom/go-mssqldb/appveyor.yml | 48 + .../github.com/denisenkom/go-mssqldb/buf.go | 154 ++-- .../denisenkom/go-mssqldb/bulkcopy.go | 554 +++++++++++ .../denisenkom/go-mssqldb/bulkcopy_sql.go | 93 ++ .../denisenkom/go-mssqldb/collation.go | 39 - .../denisenkom/go-mssqldb/convert.go | 306 +++++++ .../denisenkom/go-mssqldb/decimal.go | 22 +- .../github.com/denisenkom/go-mssqldb/doc.go | 14 + .../go-mssqldb/{ => internal/cp}/charset.go | 8 +- .../go-mssqldb/internal/cp/collation.go | 20 + .../go-mssqldb/{ => internal/cp}/cp1250.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp1251.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp1252.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp1253.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp1254.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp1255.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp1256.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp1257.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp1258.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp437.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp850.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp874.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp932.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp936.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp949.go | 2 +- .../go-mssqldb/{ => internal/cp}/cp950.go | 2 +- .../github.com/denisenkom/go-mssqldb/log.go | 21 +- .../github.com/denisenkom/go-mssqldb/mssql.go | 863 ++++++++++++++---- .../denisenkom/go-mssqldb/mssql_go1.3.go | 11 - .../denisenkom/go-mssqldb/mssql_go1.3pre.go | 11 - .../denisenkom/go-mssqldb/mssql_go110.go | 47 + .../denisenkom/go-mssqldb/mssql_go19.go | 171 ++++ .../denisenkom/go-mssqldb/mssql_go19pre.go | 16 + .../github.com/denisenkom/go-mssqldb/net.go | 22 +- .../github.com/denisenkom/go-mssqldb/ntlm.go | 84 +- .../denisenkom/go-mssqldb/parser.go | 40 +- .../github.com/denisenkom/go-mssqldb/rpc.go | 47 +- .../denisenkom/go-mssqldb/sspi_windows.go | 2 +- .../github.com/denisenkom/go-mssqldb/tds.go | 407 +++++++-- .../github.com/denisenkom/go-mssqldb/token.go | 371 ++++++-- .../denisenkom/go-mssqldb/token_string.go | 53 ++ .../github.com/denisenkom/go-mssqldb/tran.go | 27 +- .../github.com/denisenkom/go-mssqldb/types.go | 835 +++++++++++++++-- .../denisenkom/go-mssqldb/uniqueidentifier.go | 74 ++ .../go-sql-driver/mysql/.travis.yml | 2 +- vendor/github.com/go-sql-driver/mysql/AUTHORS | 5 - .../go-sql-driver/mysql/CHANGELOG.md | 11 + .../github.com/go-sql-driver/mysql/README.md | 6 +- vendor/github.com/go-sql-driver/mysql/auth.go | 8 +- .../github.com/go-sql-driver/mysql/buffer.go | 49 +- .../go-sql-driver/mysql/connection.go | 210 +---- .../go-sql-driver/mysql/connection_go18.go | 207 +++++ .../github.com/go-sql-driver/mysql/driver.go | 15 +- vendor/github.com/go-sql-driver/mysql/dsn.go | 2 +- .../github.com/go-sql-driver/mysql/packets.go | 60 +- .../github.com/go-sql-driver/mysql/utils.go | 31 +- .../go-sql-driver/mysql/utils_go17.go | 40 + .../go-sql-driver/mysql/utils_go18.go | 50 + vendor/modules.txt | 7 +- 66 files changed, 4925 insertions(+), 965 deletions(-) create mode 100644 vendor/cloud.google.com/go/AUTHORS create mode 100644 vendor/cloud.google.com/go/CONTRIBUTORS create mode 100644 vendor/cloud.google.com/go/LICENSE create mode 100644 vendor/cloud.google.com/go/civil/civil.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/appveyor.yml create mode 100644 vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/collation.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/convert.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/doc.go rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/charset.go (94%) create mode 100644 vendor/github.com/denisenkom/go-mssqldb/internal/cp/collation.go rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp1250.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp1251.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp1252.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp1253.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp1254.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp1255.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp1256.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp1257.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp1258.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp437.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp850.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp874.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp932.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp936.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp949.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{ => internal/cp}/cp950.go (99%) delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/token_string.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go create mode 100644 vendor/github.com/go-sql-driver/mysql/connection_go18.go create mode 100644 vendor/github.com/go-sql-driver/mysql/utils_go17.go create mode 100644 vendor/github.com/go-sql-driver/mysql/utils_go18.go diff --git a/go.mod b/go.mod index 9d957cea45656..933b48077df4f 100644 --- a/go.mod +++ b/go.mod @@ -139,8 +139,3 @@ require ( xorm.io/builder v0.3.5 xorm.io/core v0.6.3 ) - -replace ( - github.com/denisenkom/go-mssqldb => github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449 - github.com/go-sql-driver/mysql => github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f -) diff --git a/go.sum b/go.sum index 7e0fc6cb27785..14fac85b4c4d3 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,5 @@ cloud.google.com/go v0.30.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -59,8 +60,8 @@ github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449 h1:JpA+YMG4JLW8nzLmU05mTiuB0O17xHGxpWolEZ0zDuA= -github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= +github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 h1:b5OnbZD49x9g+/FcYbs/vukEt8C/jUbGhCJ3uduQmu8= +github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac h1:xrQJVwQCGqDvOO7/0+RyIq5J2M3Q4ZF7Ug/BMQtML1E= github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8= @@ -118,8 +119,8 @@ github.com/go-macaron/toolbox v0.0.0-20180818072302-a77f45a7ce90 h1:3wYKrRg9IjUM github.com/go-macaron/toolbox v0.0.0-20180818072302-a77f45a7ce90/go.mod h1:Ut/NmkIMGVYlEdJBzEZgWVWG5ZpYS9BLmUgXfAgi+qM= github.com/go-redis/redis v6.15.2+incompatible h1:9SpNVG76gr6InJGxoZ6IuuxaCOQwDAhzyXg+Bs+0Sb4= github.com/go-redis/redis v6.15.2+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f h1:fbIzwEaXt5b2bl9mm+PIufKTSGKk6ZuwSSTQ7iZj7Lo= -github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-xorm/core v0.6.0 h1:tp6hX+ku4OD9khFZS8VGBDRY3kfVCtelPfmkgCyHxL0= github.com/go-xorm/core v0.6.0/go.mod h1:d8FJ9Br8OGyQl12MCclmYBuBqqxsyeedpXciV5Myih8= github.com/go-xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:9wScpmSP5A3Bk8V3XHWUcJmYTh+ZnlHVyc+A4oZYS3Y= diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS new file mode 100644 index 0000000000000..c364af1da0953 --- /dev/null +++ b/vendor/cloud.google.com/go/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of cloud authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. + +Filippo Valsorda +Google Inc. +Ingo Oeser +Palm Stone Games, Inc. +Paweł Knap +Péter Szilágyi +Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS new file mode 100644 index 0000000000000..3b3cbed98e9a9 --- /dev/null +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -0,0 +1,40 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name + +# Keep the list alphabetically sorted. + +Alexis Hunt +Andreas Litt +Andrew Gerrand +Brad Fitzpatrick +Burcu Dogan +Dave Day +David Sansome +David Symonds +Filippo Valsorda +Glenn Lewis +Ingo Oeser +James Hall +Johan Euphrosine +Jonathan Amsterdam +Kunpei Sakai +Luna Duclos +Magnus Hiie +Mario Castro +Michael McGreevy +Omar Jarjur +Paweł Knap +Péter Szilágyi +Sarah Adams +Thanatat Tamtan +Toby Burress +Tuo Shan +Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/vendor/cloud.google.com/go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/civil/civil.go b/vendor/cloud.google.com/go/civil/civil.go new file mode 100644 index 0000000000000..29272ef26a313 --- /dev/null +++ b/vendor/cloud.google.com/go/civil/civil.go @@ -0,0 +1,277 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package civil implements types for civil time, a time-zone-independent +// representation of time that follows the rules of the proleptic +// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second +// minutes. +// +// Because they lack location information, these types do not represent unique +// moments or intervals of time. Use time.Time for that purpose. +package civil + +import ( + "fmt" + "time" +) + +// A Date represents a date (year, month, day). +// +// This type does not include location information, and therefore does not +// describe a unique 24-hour timespan. +type Date struct { + Year int // Year (e.g., 2014). + Month time.Month // Month of the year (January = 1, ...). + Day int // Day of the month, starting at 1. +} + +// DateOf returns the Date in which a time occurs in that time's location. +func DateOf(t time.Time) Date { + var d Date + d.Year, d.Month, d.Day = t.Date() + return d +} + +// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents. +func ParseDate(s string) (Date, error) { + t, err := time.Parse("2006-01-02", s) + if err != nil { + return Date{}, err + } + return DateOf(t), nil +} + +// String returns the date in RFC3339 full-date format. +func (d Date) String() string { + return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) +} + +// IsValid reports whether the date is valid. +func (d Date) IsValid() bool { + return DateOf(d.In(time.UTC)) == d +} + +// In returns the time corresponding to time 00:00:00 of the date in the location. +// +// In is always consistent with time.Date, even when time.Date returns a time +// on a different day. For example, if loc is America/Indiana/Vincennes, then both +// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc) +// and +// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc) +// return 23:00:00 on April 30, 1955. +// +// In panics if loc is nil. +func (d Date) In(loc *time.Location) time.Time { + return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) +} + +// AddDays returns the date that is n days in the future. +// n can also be negative to go into the past. +func (d Date) AddDays(n int) Date { + return DateOf(d.In(time.UTC).AddDate(0, 0, n)) +} + +// DaysSince returns the signed number of days between the date and s, not including the end day. +// This is the inverse operation to AddDays. +func (d Date) DaysSince(s Date) (days int) { + // We convert to Unix time so we do not have to worry about leap seconds: + // Unix time increases by exactly 86400 seconds per day. + deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() + return int(deltaUnix / 86400) +} + +// Before reports whether d1 occurs before d2. +func (d1 Date) Before(d2 Date) bool { + if d1.Year != d2.Year { + return d1.Year < d2.Year + } + if d1.Month != d2.Month { + return d1.Month < d2.Month + } + return d1.Day < d2.Day +} + +// After reports whether d1 occurs after d2. +func (d1 Date) After(d2 Date) bool { + return d2.Before(d1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of d.String(). +func (d Date) MarshalText() ([]byte, error) { + return []byte(d.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The date is expected to be a string in a format accepted by ParseDate. +func (d *Date) UnmarshalText(data []byte) error { + var err error + *d, err = ParseDate(string(data)) + return err +} + +// A Time represents a time with nanosecond precision. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +// +// This type exists to represent the TIME type in storage-based APIs like BigQuery. +// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type. +type Time struct { + Hour int // The hour of the day in 24-hour format; range [0-23] + Minute int // The minute of the hour; range [0-59] + Second int // The second of the minute; range [0-59] + Nanosecond int // The nanosecond of the second; range [0-999999999] +} + +// TimeOf returns the Time representing the time of day in which a time occurs +// in that time's location. It ignores the date. +func TimeOf(t time.Time) Time { + var tm Time + tm.Hour, tm.Minute, tm.Second = t.Clock() + tm.Nanosecond = t.Nanosecond() + return tm +} + +// ParseTime parses a string and returns the time value it represents. +// ParseTime accepts an extended form of the RFC3339 partial-time format. After +// the HH:MM:SS part of the string, an optional fractional part may appear, +// consisting of a decimal point followed by one to nine decimal digits. +// (RFC3339 admits only one digit after the decimal point). +func ParseTime(s string) (Time, error) { + t, err := time.Parse("15:04:05.999999999", s) + if err != nil { + return Time{}, err + } + return TimeOf(t), nil +} + +// String returns the date in the format described in ParseTime. If Nanoseconds +// is zero, no fractional part will be generated. Otherwise, the result will +// end with a fractional part consisting of a decimal point and nine digits. +func (t Time) String() string { + s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) + if t.Nanosecond == 0 { + return s + } + return s + fmt.Sprintf(".%09d", t.Nanosecond) +} + +// IsValid reports whether the time is valid. +func (t Time) IsValid() bool { + // Construct a non-zero time. + tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) + return TimeOf(tm) == t +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of t.String(). +func (t Time) MarshalText() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The time is expected to be a string in a format accepted by ParseTime. +func (t *Time) UnmarshalText(data []byte) error { + var err error + *t, err = ParseTime(string(data)) + return err +} + +// A DateTime represents a date and time. +// +// This type does not include location information, and therefore does not +// describe a unique moment in time. +type DateTime struct { + Date Date + Time Time +} + +// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub. + +// DateTimeOf returns the DateTime in which a time occurs in that time's location. +func DateTimeOf(t time.Time) DateTime { + return DateTime{ + Date: DateOf(t), + Time: TimeOf(t), + } +} + +// ParseDateTime parses a string and returns the DateTime it represents. +// ParseDateTime accepts a variant of the RFC3339 date-time format that omits +// the time offset but includes an optional fractional time, as described in +// ParseTime. Informally, the accepted format is +// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] +// where the 'T' may be a lower-case 't'. +func ParseDateTime(s string) (DateTime, error) { + t, err := time.Parse("2006-01-02T15:04:05.999999999", s) + if err != nil { + t, err = time.Parse("2006-01-02t15:04:05.999999999", s) + if err != nil { + return DateTime{}, err + } + } + return DateTimeOf(t), nil +} + +// String returns the date in the format described in ParseDate. +func (dt DateTime) String() string { + return dt.Date.String() + "T" + dt.Time.String() +} + +// IsValid reports whether the datetime is valid. +func (dt DateTime) IsValid() bool { + return dt.Date.IsValid() && dt.Time.IsValid() +} + +// In returns the time corresponding to the DateTime in the given location. +// +// If the time is missing or ambigous at the location, In returns the same +// result as time.Date. For example, if loc is America/Indiana/Vincennes, then +// both +// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc) +// and +// civil.DateTime{ +// civil.Date{Year: 1955, Month: time.May, Day: 1}}, +// civil.Time{Minute: 30}}.In(loc) +// return 23:30:00 on April 30, 1955. +// +// In panics if loc is nil. +func (dt DateTime) In(loc *time.Location) time.Time { + return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) +} + +// Before reports whether dt1 occurs before dt2. +func (dt1 DateTime) Before(dt2 DateTime) bool { + return dt1.In(time.UTC).Before(dt2.In(time.UTC)) +} + +// After reports whether dt1 occurs after dt2. +func (dt1 DateTime) After(dt2 DateTime) bool { + return dt2.Before(dt1) +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The output is the result of dt.String(). +func (dt DateTime) MarshalText() ([]byte, error) { + return []byte(dt.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// The datetime is expected to be a string in a format accepted by ParseDateTime +func (dt *DateTime) UnmarshalText(data []byte) error { + var err error + *dt, err = ParseDateTime(string(data)) + return err +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/README.md b/vendor/github.com/denisenkom/go-mssqldb/README.md index 8570ae9f615d5..e1a059d8855fe 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/README.md +++ b/vendor/github.com/denisenkom/go-mssqldb/README.md @@ -1,78 +1,209 @@ # A pure Go MSSQL driver for Go's database/sql package +[![GoDoc](https://godoc.org/github.com/denisenkom/go-mssqldb?status.svg)](http://godoc.org/github.com/denisenkom/go-mssqldb) +[![Build status](https://ci.appveyor.com/api/projects/status/jrln8cs62wj9i0a2?svg=true)](https://ci.appveyor.com/project/denisenkom/go-mssqldb) +[![codecov](https://codecov.io/gh/denisenkom/go-mssqldb/branch/master/graph/badge.svg)](https://codecov.io/gh/denisenkom/go-mssqldb) + ## Install - go get github.com/denisenkom/go-mssqldb +Requires Go 1.8 or above. -## Tests +Install with `go get github.com/denisenkom/go-mssqldb` . -`go test` is used for testing. A running instance of MSSQL server is required. -Environment variables are used to pass login information. +## Connection Parameters and DSN -Example: +The recommended connection string uses a URL format: +`sqlserver://username:password@host/instance?param1=value¶m2=value` +Other supported formats are listed below. + +### Common parameters: + +* `user id` - enter the SQL Server Authentication user id or the Windows Authentication user id in the DOMAIN\User format. On Windows, if user id is empty or missing Single-Sign-On is used. +* `password` +* `database` +* `connection timeout` - in seconds (default is 0 for no timeout), set to 0 for no timeout. Recommended to set to 0 and use context to manage query and connection timeouts. +* `dial timeout` - in seconds (default is 15), set to 0 for no timeout +* `encrypt` + * `disable` - Data send between client and server is not encrypted. + * `false` - Data sent between client and server is not encrypted beyond the login packet. (Default) + * `true` - Data sent between client and server is encrypted. +* `app name` - The application name (default is go-mssqldb) - env HOST=localhost SQLUSER=sa SQLPASSWORD=sa DATABASE=test go test - -## Connection Parameters - -* "server" - host or host\instance (default localhost) -* "port" - used only when there is no instance in server (default 1433) -* "failoverpartner" - host or host\instance (default is no partner). -* "failoverport" - used only when there is no instance in failoverpartner (default 1433) -* "user id" - enter the SQL Server Authentication user id or the Windows Authentication user id in the DOMAIN\User format. On Windows, if user id is empty or missing Single-Sign-On is used. -* "password" -* "database" -* "connection timeout" - in seconds (default is 30) -* "dial timeout" - in seconds (default is 5) -* "keepAlive" - in seconds; 0 to disable (default is 0) -* "log" - logging flags (default 0/no logging, 63 for full logging) +### Connection parameters for ODBC and ADO style connection strings: + +* `server` - host or host\instance (default localhost) +* `port` - used only when there is no instance in server (default 1433) + +### Less common parameters: + +* `keepAlive` - in seconds; 0 to disable (default is 30) +* `failoverpartner` - host or host\instance (default is no partner). +* `failoverport` - used only when there is no instance in failoverpartner (default 1433) +* `packet size` - in bytes; 512 to 32767 (default is 4096) + * Encrypted connections have a maximum packet size of 16383 bytes + * Further information on usage: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-network-packet-size-server-configuration-option +* `log` - logging flags (default 0/no logging, 63 for full logging) * 1 log errors * 2 log messages * 4 log rows affected * 8 trace sql statements * 16 log statement parameters * 32 log transaction begin/end -* "encrypt" - * disable - Data send between client and server is not encrypted. - * false - Data sent between client and server is not encrypted beyond the login packet. (Default) - * true - Data sent between client and server is encrypted. -* "TrustServerCertificate" +* `TrustServerCertificate` * false - Server certificate is checked. Default is false if encypt is specified. * true - Server certificate is not checked. Default is true if encrypt is not specified. If trust server certificate is true, driver accepts any certificate presented by the server and any host name in that certificate. In this mode, TLS is susceptible to man-in-the-middle attacks. This should be used only for testing. -* "certificate" - The file that contains the public key certificate of the CA that signed the SQL Server certificate. The specified certificate overrides the go platform specific CA certificates. -* "hostNameInCertificate" - Specifies the Common Name (CN) in the server certificate. Default value is the server host. -* "ServerSPN" - The kerberos SPN (Service Principal Name) for the server. Default is MSSQLSvc/host:port. -* "Workstation ID" - The workstation name (default is the host name) -* "app name" - The application name (default is go-mssqldb) -* "ApplicationIntent" - Can be given the value "ReadOnly" to initiate a read-only connection to an Availability Group listener. +* `certificate` - The file that contains the public key certificate of the CA that signed the SQL Server certificate. The specified certificate overrides the go platform specific CA certificates. +* `hostNameInCertificate` - Specifies the Common Name (CN) in the server certificate. Default value is the server host. +* `ServerSPN` - The kerberos SPN (Service Principal Name) for the server. Default is MSSQLSvc/host:port. +* `Workstation ID` - The workstation name (default is the host name) +* `ApplicationIntent` - Can be given the value `ReadOnly` to initiate a read-only connection to an Availability Group listener. -Example: +### The connection string can be specified in one of three formats: + + +1. URL: with `sqlserver` scheme. username and password appears before the host. Any instance appears as + the first segment in the path. All other options are query parameters. Examples: + + * `sqlserver://username:password@host/instance?param1=value¶m2=value` + * `sqlserver://username:password@host:port?param1=value¶m2=value` + * `sqlserver://sa@localhost/SQLExpress?database=master&connection+timeout=30` // `SQLExpress instance. + * `sqlserver://sa:mypass@localhost?database=master&connection+timeout=30` // username=sa, password=mypass. + * `sqlserver://sa:mypass@localhost:1234?database=master&connection+timeout=30` // port 1234 on localhost. + * `sqlserver://sa:my%7Bpass@somehost?connection+timeout=30` // password is "my{pass" + + A string of this format can be constructed using the `URL` type in the `net/url` package. ```go - db, err := sql.Open("mssql", "server=localhost;user id=sa") + query := url.Values{} + query.Add("app name", "MyAppName") + + u := &url.URL{ + Scheme: "sqlserver", + User: url.UserPassword(username, password), + Host: fmt.Sprintf("%s:%d", hostname, port), + // Path: instance, // if connecting to an instance instead of a port + RawQuery: query.Encode(), + } + db, err := sql.Open("sqlserver", u.String()) ``` -## Statement Parameters +2. ADO: `key=value` pairs separated by `;`. Values may not contain `;`, leading and trailing whitespace is ignored. + Examples: + + * `server=localhost\\SQLExpress;user id=sa;database=master;app name=MyAppName` + * `server=localhost;user id=sa;database=master;app name=MyAppName` + +3. ODBC: Prefix with `odbc`, `key=value` pairs separated by `;`. Allow `;` by wrapping + values in `{}`. Examples: + + * `odbc:server=localhost\\SQLExpress;user id=sa;database=master;app name=MyAppName` + * `odbc:server=localhost;user id=sa;database=master;app name=MyAppName` + * `odbc:server=localhost;user id=sa;password={foo;bar}` // Value marked with `{}`, password is "foo;bar" + * `odbc:server=localhost;user id=sa;password={foo{bar}` // Value marked with `{}`, password is "foo{bar" + * `odbc:server=localhost;user id=sa;password={foobar }` // Value marked with `{}`, password is "foobar " + * `odbc:server=localhost;user id=sa;password=foo{bar` // Literal `{`, password is "foo{bar" + * `odbc:server=localhost;user id=sa;password=foo}bar` // Literal `}`, password is "foo}bar" + * `odbc:server=localhost;user id=sa;password={foo{bar}` // Literal `{`, password is "foo{bar" + * `odbc:server=localhost;user id=sa;password={foo}}bar}` // Escaped `} with `}}`, password is "foo}bar" + +## Executing Stored Procedures + +To run a stored procedure, set the query text to the procedure name: +```go +var account = "abc" +_, err := db.ExecContext(ctx, "sp_RunMe", + sql.Named("ID", 123), + sql.Named("Account", sql.Out{Dest: &account}), +) +``` -In the SQL statement text, literals may be replaced by a parameter that matches one of the following: +## Caveat for local temporary tables -* ? -* ?nnn -* :nnn -* $nnn +Due to protocol limitations, temporary tables will only be allocated on the connection +as a result of executing a query with zero parameters. The following query +will, due to the use of a parameter, execute in its own session, +and `#mytemp` will be de-allocated right away: -where nnn represents an integer that specifies a 1-indexed positional parameter. Ex: +```go +conn, err := pool.Conn(ctx) +defer conn.Close() +_, err := conn.ExecContext(ctx, "select @p1 as x into #mytemp", 1) +// at this point #mytemp is already dropped again as the session of the ExecContext is over +``` + +To work around this, always explicitly create the local temporary +table in a query without any parameters. As a special case, the driver +will then be able to execute the query directly on the +connection-scoped session. The following example works: ```go -db.Query("SELECT * FROM t WHERE a = ?3, b = ?2, c = ?1", "x", "y", "z") +conn, err := pool.Conn(ctx) + +// Set us up so that temp table is always cleaned up, since conn.Close() +// merely returns conn to pool, rather than actually closing the connection. +defer func() { + _, _ = conn.ExecContext(ctx, "drop table #mytemp") // always clean up + conn.Close() // merely returns conn to pool +}() + + +// Since we not pass any parameters below, the query will execute on the scope of +// the connection and succeed in creating the table. +_, err := conn.ExecContext(ctx, "create table #mytemp ( x int )") + +// #mytemp is now available even if you pass parameters +_, err := conn.ExecContext(ctx, "insert into #mytemp (x) values (@p1)", 1) + ``` -will expand to roughly +## Return Status + +To get the procedure return status, pass into the parameters a +`*mssql.ReturnStatus`. For example: +``` +var rs mssql.ReturnStatus +_, err := db.ExecContext(ctx, "theproc", &rs) +log.Printf("status=%d", rs) +``` + +## Parameters + +The `sqlserver` driver uses normal MS SQL Server syntax and expects parameters in +the sql query to be in the form of either `@Name` or `@p1` to `@pN` (ordinal position). -```sql -SELECT * FROM t WHERE a = 'z', b = 'y', c = 'x' +```go +db.QueryContext(ctx, `select * from t where ID = @ID and Name = @p2;`, sql.Named("ID", 6), "Bob") ``` +### Parameter Types + +To pass specific types to the query parameters, say `varchar` or `date` types, +you must convert the types to the type before passing in. The following types +are supported: + + * string -> nvarchar + * mssql.VarChar -> varchar + * time.Time -> datetimeoffset or datetime (TDS version dependent) + * mssql.DateTime1 -> datetime + * mssql.DateTimeOffset -> datetimeoffset + * "cloud.google.com/go/civil".Date -> date + * "cloud.google.com/go/civil".DateTime -> datetime2 + * "cloud.google.com/go/civil".Time -> time + +## Important Notes + + * [LastInsertId](https://golang.org/pkg/database/sql/#Result.LastInsertId) should + not be used with this driver (or SQL Server) due to how the TDS protocol + works. Please use the [OUTPUT Clause](https://docs.microsoft.com/en-us/sql/t-sql/queries/output-clause-transact-sql) + or add a `select ID = convert(bigint, SCOPE_IDENTITY());` to the end of your + query (ref [SCOPE_IDENTITY](https://docs.microsoft.com/en-us/sql/t-sql/functions/scope-identity-transact-sql)). + This will ensure you are getting the correct ID and will prevent a network round trip. + * [NewConnector](https://godoc.org/github.com/denisenkom/go-mssqldb#NewConnector) + may be used with [OpenDB](https://golang.org/pkg/database/sql/#OpenDB). + * [Connector.SessionInitSQL](https://godoc.org/github.com/denisenkom/go-mssqldb#Connector.SessionInitSQL) + may be set to set any driver specific session settings after the session + has been reset. If empty the session will still be reset but use the database + defaults in Go1.10+. ## Features @@ -87,6 +218,34 @@ SELECT * FROM t WHERE a = 'z', b = 'y', c = 'x' * Supports connections to AlwaysOn Availability Group listeners, including re-direction to read-only replicas. * Supports query notifications +## Tests + +`go test` is used for testing. A running instance of MSSQL server is required. +Environment variables are used to pass login information. + +Example: + + env SQLSERVER_DSN=sqlserver://user:pass@hostname/instance?database=test1 go test + +## Deprecated + +These features still exist in the driver, but they are are deprecated. + +### Query Parameter Token Replace (driver "mssql") + +If you use the driver name "mssql" (rather then "sqlserver") the SQL text +will be loosly parsed and an attempt to extract identifiers using one of + +* ? +* ?nnn +* :nnn +* $nnn + +will be used. This is not recommended with SQL Server. +There is at least one existing `won't fix` issue with the query parsing. + +Use the native "@Name" parameters instead with the "sqlserver" driver name. + ## Known Issues * SQL Server 2008 and 2008 R2 engine cannot handle login records when SSL encryption is not disabled. diff --git a/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml b/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml new file mode 100644 index 0000000000000..2ae5456d5cb70 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml @@ -0,0 +1,48 @@ +version: 1.0.{build} + +os: Windows Server 2012 R2 + +clone_folder: c:\gopath\src\github.com\denisenkom\go-mssqldb + +environment: + GOPATH: c:\gopath + HOST: localhost + SQLUSER: sa + SQLPASSWORD: Password12! + DATABASE: test + GOVERSION: 110 + matrix: + - GOVERSION: 18 + SQLINSTANCE: SQL2016 + - GOVERSION: 19 + SQLINSTANCE: SQL2016 + - GOVERSION: 110 + SQLINSTANCE: SQL2016 + - SQLINSTANCE: SQL2014 + - SQLINSTANCE: SQL2012SP1 + - SQLINSTANCE: SQL2008R2SP2 + +install: + - set GOROOT=c:\go%GOVERSION% + - set PATH=%GOPATH%\bin;%GOROOT%\bin;%PATH% + - go version + - go env + - go get -u cloud.google.com/go/civil + +build_script: + - go build + +before_test: + # setup SQL Server + - ps: | + $instanceName = $env:SQLINSTANCE + Start-Service "MSSQL`$$instanceName" + Start-Service "SQLBrowser" + - sqlcmd -S "(local)\%SQLINSTANCE%" -Q "Use [master]; CREATE DATABASE test;" + - sqlcmd -S "(local)\%SQLINSTANCE%" -h -1 -Q "set nocount on; Select @@version" + - pip install codecov + + +test_script: + - go test -race -cpu 4 -coverprofile=coverage.txt -covermode=atomic + - codecov -f coverage.txt diff --git a/vendor/github.com/denisenkom/go-mssqldb/buf.go b/vendor/github.com/denisenkom/go-mssqldb/buf.go index 42e8ae345cf48..927d75d1b78b4 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/buf.go +++ b/vendor/github.com/denisenkom/go-mssqldb/buf.go @@ -2,12 +2,14 @@ package mssql import ( "encoding/binary" - "io" "errors" + "io" ) +type packetType uint8 + type header struct { - PacketType uint8 + PacketType packetType Status uint8 Size uint16 Spid uint16 @@ -15,125 +17,159 @@ type header struct { Pad uint8 } +// tdsBuffer reads and writes TDS packets of data to the transport. +// The write and read buffers are separate to make sending attn signals +// possible without locks. Currently attn signals are only sent during +// reads, not writes. type tdsBuffer struct { - buf []byte - pos uint16 - transport io.ReadWriteCloser - size uint16 + transport io.ReadWriteCloser + + packetSize int + + // Write fields. + wbuf []byte + wpos int + wPacketSeq byte + wPacketType packetType + + // Read fields. + rbuf []byte + rpos int + rsize int final bool - packet_type uint8 - afterFirst func() + rPacketType packetType + + // afterFirst is assigned to right after tdsBuffer is created and + // before the first use. It is executed after the first packet is + // written and then removed. + afterFirst func() } -func newTdsBuffer(bufsize int, transport io.ReadWriteCloser) *tdsBuffer { - buf := make([]byte, bufsize) - w := new(tdsBuffer) - w.buf = buf - w.pos = 8 - w.transport = transport - w.size = 0 - return w +func newTdsBuffer(bufsize uint16, transport io.ReadWriteCloser) *tdsBuffer { + return &tdsBuffer{ + packetSize: int(bufsize), + wbuf: make([]byte, 1<<16), + rbuf: make([]byte, 1<<16), + rpos: 8, + transport: transport, + } +} + +func (rw *tdsBuffer) ResizeBuffer(packetSize int) { + rw.packetSize = packetSize +} + +func (w *tdsBuffer) PackageSize() int { + return w.packetSize } func (w *tdsBuffer) flush() (err error) { - // writing packet size - binary.BigEndian.PutUint16(w.buf[2:], w.pos) + // Write packet size. + w.wbuf[0] = byte(w.wPacketType) + binary.BigEndian.PutUint16(w.wbuf[2:], uint16(w.wpos)) + w.wbuf[6] = w.wPacketSeq - // writing packet into underlying transport - if _, err = w.transport.Write(w.buf[:w.pos]); err != nil { + // Write packet into underlying transport. + if _, err = w.transport.Write(w.wbuf[:w.wpos]); err != nil { return err } + // It is possible to create a whole new buffer after a flush. + // Useful for debugging. Normally reuse the buffer. + // w.wbuf = make([]byte, 1<<16) - // execute afterFirst hook if it is set + // Execute afterFirst hook if it is set. if w.afterFirst != nil { w.afterFirst() w.afterFirst = nil } - w.pos = 8 - // packet number - w.buf[6] += 1 + w.wpos = 8 + w.wPacketSeq++ return nil } func (w *tdsBuffer) Write(p []byte) (total int, err error) { - total = 0 for { - copied := copy(w.buf[w.pos:], p) - w.pos += uint16(copied) + copied := copy(w.wbuf[w.wpos:w.packetSize], p) + w.wpos += copied total += copied if copied == len(p) { - break + return } if err = w.flush(); err != nil { return } p = p[copied:] } - return } func (w *tdsBuffer) WriteByte(b byte) error { - if int(w.pos) == len(w.buf) { + if int(w.wpos) == len(w.wbuf) || w.wpos == w.packetSize { if err := w.flush(); err != nil { return err } } - w.buf[w.pos] = b - w.pos += 1 + w.wbuf[w.wpos] = b + w.wpos += 1 return nil } -func (w *tdsBuffer) BeginPacket(packet_type byte) { - w.buf[0] = packet_type - w.buf[1] = 0 // packet is incomplete - w.buf[4] = 0 // spid - w.buf[5] = 0 - w.buf[6] = 1 // packet id - w.buf[7] = 0 // window - w.pos = 8 +func (w *tdsBuffer) BeginPacket(packetType packetType, resetSession bool) { + status := byte(0) + if resetSession { + switch packetType { + // Reset session can only be set on the following packet types. + case packSQLBatch, packRPCRequest, packTransMgrReq: + status = 0x8 + } + } + w.wbuf[1] = status // Packet is incomplete. This byte is set again in FinishPacket. + w.wpos = 8 + w.wPacketSeq = 1 + w.wPacketType = packetType } func (w *tdsBuffer) FinishPacket() error { - w.buf[1] = 1 // this is last packet + w.wbuf[1] |= 1 // Mark this as the last packet in the message. return w.flush() } +var headerSize = binary.Size(header{}) + func (r *tdsBuffer) readNextPacket() error { - header := header{} + h := header{} var err error - err = binary.Read(r.transport, binary.BigEndian, &header) + err = binary.Read(r.transport, binary.BigEndian, &h) if err != nil { return err } - offset := uint16(binary.Size(header)) - if int(header.Size) > len(r.buf) { + if int(h.Size) > r.packetSize { return errors.New("Invalid packet size, it is longer than buffer size") } - if int(offset) > int(header.Size) { + if headerSize > int(h.Size) { return errors.New("Invalid packet size, it is shorter than header size") } - _, err = io.ReadFull(r.transport, r.buf[offset:header.Size]) + _, err = io.ReadFull(r.transport, r.rbuf[headerSize:h.Size]) if err != nil { return err } - r.pos = offset - r.size = header.Size - r.final = header.Status != 0 - r.packet_type = header.PacketType + r.rpos = headerSize + r.rsize = int(h.Size) + r.final = h.Status != 0 + r.rPacketType = h.PacketType return nil } -func (r *tdsBuffer) BeginRead() (uint8, error) { +func (r *tdsBuffer) BeginRead() (packetType, error) { err := r.readNextPacket() if err != nil { return 0, err } - return r.packet_type, nil + return r.rPacketType, nil } func (r *tdsBuffer) ReadByte() (res byte, err error) { - if r.pos == r.size { + if r.rpos == r.rsize { if r.final { return 0, io.EOF } @@ -142,8 +178,8 @@ func (r *tdsBuffer) ReadByte() (res byte, err error) { return 0, err } } - res = r.buf[r.pos] - r.pos++ + res = r.rbuf[r.rpos] + r.rpos++ return res, nil } @@ -207,7 +243,7 @@ func (r *tdsBuffer) readUcs2(numchars int) string { func (r *tdsBuffer) Read(buf []byte) (copied int, err error) { copied = 0 err = nil - if r.pos == r.size { + if r.rpos == r.rsize { if r.final { return 0, io.EOF } @@ -216,7 +252,7 @@ func (r *tdsBuffer) Read(buf []byte) (copied int, err error) { return } } - copied = copy(buf, r.buf[r.pos:r.size]) - r.pos += uint16(copied) + copied = copy(buf, r.rbuf[r.rpos:r.rsize]) + r.rpos += copied return } diff --git a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go new file mode 100644 index 0000000000000..3b319af893fd3 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go @@ -0,0 +1,554 @@ +package mssql + +import ( + "bytes" + "context" + "encoding/binary" + "fmt" + "math" + "reflect" + "strconv" + "strings" + "time" +) + +type Bulk struct { + // ctx is used only for AddRow and Done methods. + // This could be removed if AddRow and Done accepted + // a ctx field as well, which is available with the + // database/sql call. + ctx context.Context + + cn *Conn + metadata []columnStruct + bulkColumns []columnStruct + columnsName []string + tablename string + numRows int + + headerSent bool + Options BulkOptions + Debug bool +} +type BulkOptions struct { + CheckConstraints bool + FireTriggers bool + KeepNulls bool + KilobytesPerBatch int + RowsPerBatch int + Order []string + Tablock bool +} + +type DataValue interface{} + +func (cn *Conn) CreateBulk(table string, columns []string) (_ *Bulk) { + b := Bulk{ctx: context.Background(), cn: cn, tablename: table, headerSent: false, columnsName: columns} + b.Debug = false + return &b +} + +func (cn *Conn) CreateBulkContext(ctx context.Context, table string, columns []string) (_ *Bulk) { + b := Bulk{ctx: ctx, cn: cn, tablename: table, headerSent: false, columnsName: columns} + b.Debug = false + return &b +} + +func (b *Bulk) sendBulkCommand(ctx context.Context) (err error) { + //get table columns info + err = b.getMetadata(ctx) + if err != nil { + return err + } + + //match the columns + for _, colname := range b.columnsName { + var bulkCol *columnStruct + + for _, m := range b.metadata { + if m.ColName == colname { + bulkCol = &m + break + } + } + if bulkCol != nil { + + if bulkCol.ti.TypeId == typeUdt { + //send udt as binary + bulkCol.ti.TypeId = typeBigVarBin + } + b.bulkColumns = append(b.bulkColumns, *bulkCol) + b.dlogf("Adding column %s %s %#x", colname, bulkCol.ColName, bulkCol.ti.TypeId) + } else { + return fmt.Errorf("Column %s does not exist in destination table %s", colname, b.tablename) + } + } + + //create the bulk command + + //columns definitions + var col_defs bytes.Buffer + for i, col := range b.bulkColumns { + if i != 0 { + col_defs.WriteString(", ") + } + col_defs.WriteString("[" + col.ColName + "] " + makeDecl(col.ti)) + } + + //options + var with_opts []string + + if b.Options.CheckConstraints { + with_opts = append(with_opts, "CHECK_CONSTRAINTS") + } + if b.Options.FireTriggers { + with_opts = append(with_opts, "FIRE_TRIGGERS") + } + if b.Options.KeepNulls { + with_opts = append(with_opts, "KEEP_NULLS") + } + if b.Options.KilobytesPerBatch > 0 { + with_opts = append(with_opts, fmt.Sprintf("KILOBYTES_PER_BATCH = %d", b.Options.KilobytesPerBatch)) + } + if b.Options.RowsPerBatch > 0 { + with_opts = append(with_opts, fmt.Sprintf("ROWS_PER_BATCH = %d", b.Options.RowsPerBatch)) + } + if len(b.Options.Order) > 0 { + with_opts = append(with_opts, fmt.Sprintf("ORDER(%s)", strings.Join(b.Options.Order, ","))) + } + if b.Options.Tablock { + with_opts = append(with_opts, "TABLOCK") + } + var with_part string + if len(with_opts) > 0 { + with_part = fmt.Sprintf("WITH (%s)", strings.Join(with_opts, ",")) + } + + query := fmt.Sprintf("INSERT BULK %s (%s) %s", b.tablename, col_defs.String(), with_part) + + stmt, err := b.cn.PrepareContext(ctx, query) + if err != nil { + return fmt.Errorf("Prepare failed: %s", err.Error()) + } + b.dlogf(query) + + _, err = stmt.(*Stmt).ExecContext(ctx, nil) + if err != nil { + return err + } + + b.headerSent = true + + var buf = b.cn.sess.buf + buf.BeginPacket(packBulkLoadBCP, false) + + // Send the columns metadata. + columnMetadata := b.createColMetadata() + _, err = buf.Write(columnMetadata) + + return +} + +// AddRow immediately writes the row to the destination table. +// The arguments are the row values in the order they were specified. +func (b *Bulk) AddRow(row []interface{}) (err error) { + if !b.headerSent { + err = b.sendBulkCommand(b.ctx) + if err != nil { + return + } + } + + if len(row) != len(b.bulkColumns) { + return fmt.Errorf("Row does not have the same number of columns than the destination table %d %d", + len(row), len(b.bulkColumns)) + } + + bytes, err := b.makeRowData(row) + if err != nil { + return + } + + _, err = b.cn.sess.buf.Write(bytes) + if err != nil { + return + } + + b.numRows = b.numRows + 1 + return +} + +func (b *Bulk) makeRowData(row []interface{}) ([]byte, error) { + buf := new(bytes.Buffer) + buf.WriteByte(byte(tokenRow)) + + var logcol bytes.Buffer + for i, col := range b.bulkColumns { + + if b.Debug { + logcol.WriteString(fmt.Sprintf(" col[%d]='%v' ", i, row[i])) + } + param, err := b.makeParam(row[i], col) + if err != nil { + return nil, fmt.Errorf("bulkcopy: %s", err.Error()) + } + + if col.ti.Writer == nil { + return nil, fmt.Errorf("no writer for column: %s, TypeId: %#x", + col.ColName, col.ti.TypeId) + } + err = col.ti.Writer(buf, param.ti, param.buffer) + if err != nil { + return nil, fmt.Errorf("bulkcopy: %s", err.Error()) + } + } + + b.dlogf("row[%d] %s\n", b.numRows, logcol.String()) + + return buf.Bytes(), nil +} + +func (b *Bulk) Done() (rowcount int64, err error) { + if b.headerSent == false { + //no rows had been sent + return 0, nil + } + var buf = b.cn.sess.buf + buf.WriteByte(byte(tokenDone)) + + binary.Write(buf, binary.LittleEndian, uint16(doneFinal)) + binary.Write(buf, binary.LittleEndian, uint16(0)) // curcmd + + if b.cn.sess.loginAck.TDSVersion >= verTDS72 { + binary.Write(buf, binary.LittleEndian, uint64(0)) //rowcount 0 + } else { + binary.Write(buf, binary.LittleEndian, uint32(0)) //rowcount 0 + } + + buf.FinishPacket() + + tokchan := make(chan tokenStruct, 5) + go processResponse(b.ctx, b.cn.sess, tokchan, nil) + + var rowCount int64 + for token := range tokchan { + switch token := token.(type) { + case doneStruct: + if token.Status&doneCount != 0 { + rowCount = int64(token.RowCount) + } + if token.isError() { + return 0, token.getError() + } + case error: + return 0, b.cn.checkBadConn(token) + } + } + return rowCount, nil +} + +func (b *Bulk) createColMetadata() []byte { + buf := new(bytes.Buffer) + buf.WriteByte(byte(tokenColMetadata)) // token + binary.Write(buf, binary.LittleEndian, uint16(len(b.bulkColumns))) // column count + + for i, col := range b.bulkColumns { + + if b.cn.sess.loginAck.TDSVersion >= verTDS72 { + binary.Write(buf, binary.LittleEndian, uint32(col.UserType)) // usertype, always 0? + } else { + binary.Write(buf, binary.LittleEndian, uint16(col.UserType)) + } + binary.Write(buf, binary.LittleEndian, uint16(col.Flags)) + + writeTypeInfo(buf, &b.bulkColumns[i].ti) + + if col.ti.TypeId == typeNText || + col.ti.TypeId == typeText || + col.ti.TypeId == typeImage { + + tablename_ucs2 := str2ucs2(b.tablename) + binary.Write(buf, binary.LittleEndian, uint16(len(tablename_ucs2)/2)) + buf.Write(tablename_ucs2) + } + colname_ucs2 := str2ucs2(col.ColName) + buf.WriteByte(uint8(len(colname_ucs2) / 2)) + buf.Write(colname_ucs2) + } + + return buf.Bytes() +} + +func (b *Bulk) getMetadata(ctx context.Context) (err error) { + stmt, err := b.cn.prepareContext(ctx, "SET FMTONLY ON") + if err != nil { + return + } + + _, err = stmt.ExecContext(ctx, nil) + if err != nil { + return + } + + // Get columns info. + stmt, err = b.cn.prepareContext(ctx, fmt.Sprintf("select * from %s SET FMTONLY OFF", b.tablename)) + if err != nil { + return + } + rows, err := stmt.QueryContext(ctx, nil) + if err != nil { + return fmt.Errorf("get columns info failed: %v", err) + } + b.metadata = rows.(*Rows).cols + + if b.Debug { + for _, col := range b.metadata { + b.dlogf("col: %s typeId: %#x size: %d scale: %d prec: %d flags: %d lcid: %#x\n", + col.ColName, col.ti.TypeId, col.ti.Size, col.ti.Scale, col.ti.Prec, + col.Flags, col.ti.Collation.LcidAndFlags) + } + } + + return rows.Close() +} + +func (b *Bulk) makeParam(val DataValue, col columnStruct) (res param, err error) { + res.ti.Size = col.ti.Size + res.ti.TypeId = col.ti.TypeId + + if val == nil { + res.ti.Size = 0 + return + } + + switch col.ti.TypeId { + + case typeInt1, typeInt2, typeInt4, typeInt8, typeIntN: + var intvalue int64 + + switch val := val.(type) { + case int: + intvalue = int64(val) + case int32: + intvalue = int64(val) + case int64: + intvalue = val + default: + err = fmt.Errorf("mssql: invalid type for int column") + return + } + + res.buffer = make([]byte, res.ti.Size) + if col.ti.Size == 1 { + res.buffer[0] = byte(intvalue) + } else if col.ti.Size == 2 { + binary.LittleEndian.PutUint16(res.buffer, uint16(intvalue)) + } else if col.ti.Size == 4 { + binary.LittleEndian.PutUint32(res.buffer, uint32(intvalue)) + } else if col.ti.Size == 8 { + binary.LittleEndian.PutUint64(res.buffer, uint64(intvalue)) + } + case typeFlt4, typeFlt8, typeFltN: + var floatvalue float64 + + switch val := val.(type) { + case float32: + floatvalue = float64(val) + case float64: + floatvalue = val + case int: + floatvalue = float64(val) + case int64: + floatvalue = float64(val) + default: + err = fmt.Errorf("mssql: invalid type for float column: %s", val) + return + } + + if col.ti.Size == 4 { + res.buffer = make([]byte, 4) + binary.LittleEndian.PutUint32(res.buffer, math.Float32bits(float32(floatvalue))) + } else if col.ti.Size == 8 { + res.buffer = make([]byte, 8) + binary.LittleEndian.PutUint64(res.buffer, math.Float64bits(floatvalue)) + } + case typeNVarChar, typeNText, typeNChar: + + switch val := val.(type) { + case string: + res.buffer = str2ucs2(val) + case []byte: + res.buffer = val + default: + err = fmt.Errorf("mssql: invalid type for nvarchar column: %s", val) + return + } + res.ti.Size = len(res.buffer) + + case typeVarChar, typeBigVarChar, typeText, typeChar, typeBigChar: + switch val := val.(type) { + case string: + res.buffer = []byte(val) + case []byte: + res.buffer = val + default: + err = fmt.Errorf("mssql: invalid type for varchar column: %s", val) + return + } + res.ti.Size = len(res.buffer) + + case typeBit, typeBitN: + if reflect.TypeOf(val).Kind() != reflect.Bool { + err = fmt.Errorf("mssql: invalid type for bit column: %s", val) + return + } + res.ti.TypeId = typeBitN + res.ti.Size = 1 + res.buffer = make([]byte, 1) + if val.(bool) { + res.buffer[0] = 1 + } + case typeDateTime2N: + switch val := val.(type) { + case time.Time: + res.buffer = encodeDateTime2(val, int(col.ti.Scale)) + res.ti.Size = len(res.buffer) + default: + err = fmt.Errorf("mssql: invalid type for datetime2 column: %s", val) + return + } + case typeDateTimeOffsetN: + switch val := val.(type) { + case time.Time: + res.buffer = encodeDateTimeOffset(val, int(res.ti.Scale)) + res.ti.Size = len(res.buffer) + + default: + err = fmt.Errorf("mssql: invalid type for datetimeoffset column: %s", val) + return + } + case typeDateN: + switch val := val.(type) { + case time.Time: + res.buffer = encodeDate(val) + res.ti.Size = len(res.buffer) + default: + err = fmt.Errorf("mssql: invalid type for date column: %s", val) + return + } + case typeDateTime, typeDateTimeN, typeDateTim4: + switch val := val.(type) { + case time.Time: + if col.ti.Size == 4 { + res.buffer = encodeDateTim4(val) + res.ti.Size = len(res.buffer) + } else if col.ti.Size == 8 { + res.buffer = encodeDateTime(val) + res.ti.Size = len(res.buffer) + } else { + err = fmt.Errorf("mssql: invalid size of column") + } + + default: + err = fmt.Errorf("mssql: invalid type for datetime column: %s", val) + } + + // case typeMoney, typeMoney4, typeMoneyN: + case typeDecimal, typeDecimalN, typeNumeric, typeNumericN: + var value float64 + switch v := val.(type) { + case int: + value = float64(v) + case int8: + value = float64(v) + case int16: + value = float64(v) + case int32: + value = float64(v) + case int64: + value = float64(v) + case float32: + value = float64(v) + case float64: + value = v + case string: + if value, err = strconv.ParseFloat(v, 64); err != nil { + return res, fmt.Errorf("bulk: unable to convert string to float: %v", err) + } + default: + return res, fmt.Errorf("unknown value for decimal: %#v", v) + } + + perc := col.ti.Prec + scale := col.ti.Scale + var dec Decimal + dec, err = Float64ToDecimalScale(value, scale) + if err != nil { + return res, err + } + dec.prec = perc + + var length byte + switch { + case perc <= 9: + length = 4 + case perc <= 19: + length = 8 + case perc <= 28: + length = 12 + default: + length = 16 + } + + buf := make([]byte, length+1) + // first byte length written by typeInfo.writer + res.ti.Size = int(length) + 1 + // second byte sign + if value < 0 { + buf[0] = 0 + } else { + buf[0] = 1 + } + + ub := dec.UnscaledBytes() + l := len(ub) + if l > int(length) { + err = fmt.Errorf("decimal out of range: %s", dec) + return res, err + } + // reverse the bytes + for i, j := 1, l-1; j >= 0; i, j = i+1, j-1 { + buf[i] = ub[j] + } + res.buffer = buf + case typeBigVarBin, typeBigBinary: + switch val := val.(type) { + case []byte: + res.ti.Size = len(val) + res.buffer = val + default: + err = fmt.Errorf("mssql: invalid type for Binary column: %s", val) + return + } + case typeGuid: + switch val := val.(type) { + case []byte: + res.ti.Size = len(val) + res.buffer = val + default: + err = fmt.Errorf("mssql: invalid type for Guid column: %s", val) + return + } + + default: + err = fmt.Errorf("mssql: type %x not implemented", col.ti.TypeId) + } + return + +} + +func (b *Bulk) dlogf(format string, v ...interface{}) { + if b.Debug { + b.cn.sess.log.Printf(format, v...) + } +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go new file mode 100644 index 0000000000000..709505b2a06a7 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go @@ -0,0 +1,93 @@ +package mssql + +import ( + "context" + "database/sql/driver" + "encoding/json" + "errors" +) + +type copyin struct { + cn *Conn + bulkcopy *Bulk + closed bool +} + +type serializableBulkConfig struct { + TableName string + ColumnsName []string + Options BulkOptions +} + +func (d *Driver) OpenConnection(dsn string) (*Conn, error) { + return d.open(context.Background(), dsn) +} + +func (c *Conn) prepareCopyIn(ctx context.Context, query string) (_ driver.Stmt, err error) { + config_json := query[11:] + + bulkconfig := serializableBulkConfig{} + err = json.Unmarshal([]byte(config_json), &bulkconfig) + if err != nil { + return + } + + bulkcopy := c.CreateBulkContext(ctx, bulkconfig.TableName, bulkconfig.ColumnsName) + bulkcopy.Options = bulkconfig.Options + + ci := ©in{ + cn: c, + bulkcopy: bulkcopy, + } + + return ci, nil +} + +func CopyIn(table string, options BulkOptions, columns ...string) string { + bulkconfig := &serializableBulkConfig{TableName: table, Options: options, ColumnsName: columns} + + config_json, err := json.Marshal(bulkconfig) + if err != nil { + panic(err) + } + + stmt := "INSERTBULK " + string(config_json) + + return stmt +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + panic("should never be called") +} + +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errors.New("copyin query is closed") + } + + if len(v) == 0 { + rowCount, err := ci.bulkcopy.Done() + ci.closed = true + return driver.RowsAffected(rowCount), err + } + + t := make([]interface{}, len(v)) + for i, val := range v { + t[i] = val + } + + err = ci.bulkcopy.AddRow(t) + if err != nil { + return + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + return nil +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/collation.go b/vendor/github.com/denisenkom/go-mssqldb/collation.go deleted file mode 100644 index ac9cf20b7b051..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/collation.go +++ /dev/null @@ -1,39 +0,0 @@ -package mssql - -import ( - "encoding/binary" - "io" -) - -// http://msdn.microsoft.com/en-us/library/dd340437.aspx - -type collation struct { - lcidAndFlags uint32 - sortId uint8 -} - -func (c collation) getLcid() uint32 { - return c.lcidAndFlags & 0x000fffff -} - -func (c collation) getFlags() uint32 { - return (c.lcidAndFlags & 0x0ff00000) >> 20 -} - -func (c collation) getVersion() uint32 { - return (c.lcidAndFlags & 0xf0000000) >> 28 -} - -func readCollation(r *tdsBuffer) (res collation) { - res.lcidAndFlags = r.uint32() - res.sortId = r.byte() - return -} - -func writeCollation(w io.Writer, col collation) (err error) { - if err = binary.Write(w, binary.LittleEndian, col.lcidAndFlags); err != nil { - return - } - err = binary.Write(w, binary.LittleEndian, col.sortId) - return -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/convert.go b/vendor/github.com/denisenkom/go-mssqldb/convert.go new file mode 100644 index 0000000000000..51bd4ee3ac73b --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/convert.go @@ -0,0 +1,306 @@ +package mssql + +import "errors" + +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Type conversions for Scan. + +// This file was imported from database.sql.convert for go 1.10.3 with minor modifications to get +// convertAssign function +// This function is used internally by sql to convert values during call to Scan, we need same +// logic to return values for OUTPUT parameters. +// TODO: sql library should instead expose function defaultCheckNamedValue to be callable by drivers + +import ( + "database/sql" + "database/sql/driver" + "fmt" + "reflect" + "strconv" + "time" +) + +var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error + +// convertAssign copies to dest the value in src, converting it if possible. +// An error is returned if the copy would result in loss of information. +// dest should be a pointer type. +func convertAssign(dest, src interface{}) error { + // Common cases, without reflect. + switch s := src.(type) { + case string: + switch d := dest.(type) { + case *string: + if d == nil { + return errNilPtr + } + *d = s + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = []byte(s) + return nil + case *sql.RawBytes: + if d == nil { + return errNilPtr + } + *d = append((*d)[:0], s...) + return nil + } + case []byte: + switch d := dest.(type) { + case *string: + if d == nil { + return errNilPtr + } + *d = string(s) + return nil + case *interface{}: + if d == nil { + return errNilPtr + } + *d = cloneBytes(s) + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = cloneBytes(s) + return nil + case *sql.RawBytes: + if d == nil { + return errNilPtr + } + *d = s + return nil + } + case time.Time: + switch d := dest.(type) { + case *time.Time: + *d = s + return nil + case *string: + *d = s.Format(time.RFC3339Nano) + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = []byte(s.Format(time.RFC3339Nano)) + return nil + case *sql.RawBytes: + if d == nil { + return errNilPtr + } + *d = s.AppendFormat((*d)[:0], time.RFC3339Nano) + return nil + } + case nil: + switch d := dest.(type) { + case *interface{}: + if d == nil { + return errNilPtr + } + *d = nil + return nil + case *[]byte: + if d == nil { + return errNilPtr + } + *d = nil + return nil + case *sql.RawBytes: + if d == nil { + return errNilPtr + } + *d = nil + return nil + } + } + + var sv reflect.Value + + switch d := dest.(type) { + case *string: + sv = reflect.ValueOf(src) + switch sv.Kind() { + case reflect.Bool, + reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, + reflect.Float32, reflect.Float64: + *d = asString(src) + return nil + } + case *[]byte: + sv = reflect.ValueOf(src) + if b, ok := asBytes(nil, sv); ok { + *d = b + return nil + } + case *sql.RawBytes: + sv = reflect.ValueOf(src) + if b, ok := asBytes([]byte(*d)[:0], sv); ok { + *d = sql.RawBytes(b) + return nil + } + case *bool: + bv, err := driver.Bool.ConvertValue(src) + if err == nil { + *d = bv.(bool) + } + return err + case *interface{}: + *d = src + return nil + } + + if scanner, ok := dest.(sql.Scanner); ok { + return scanner.Scan(src) + } + + dpv := reflect.ValueOf(dest) + if dpv.Kind() != reflect.Ptr { + return errors.New("destination not a pointer") + } + if dpv.IsNil() { + return errNilPtr + } + + if !sv.IsValid() { + sv = reflect.ValueOf(src) + } + + dv := reflect.Indirect(dpv) + if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) { + switch b := src.(type) { + case []byte: + dv.Set(reflect.ValueOf(cloneBytes(b))) + default: + dv.Set(sv) + } + return nil + } + + if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) { + dv.Set(sv.Convert(dv.Type())) + return nil + } + + // The following conversions use a string value as an intermediate representation + // to convert between various numeric types. + // + // This also allows scanning into user defined types such as "type Int int64". + // For symmetry, also check for string destination types. + switch dv.Kind() { + case reflect.Ptr: + if src == nil { + dv.Set(reflect.Zero(dv.Type())) + return nil + } else { + dv.Set(reflect.New(dv.Type().Elem())) + return convertAssign(dv.Interface(), src) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + s := asString(src) + i64, err := strconv.ParseInt(s, 10, dv.Type().Bits()) + if err != nil { + err = strconvErr(err) + return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) + } + dv.SetInt(i64) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + s := asString(src) + u64, err := strconv.ParseUint(s, 10, dv.Type().Bits()) + if err != nil { + err = strconvErr(err) + return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) + } + dv.SetUint(u64) + return nil + case reflect.Float32, reflect.Float64: + s := asString(src) + f64, err := strconv.ParseFloat(s, dv.Type().Bits()) + if err != nil { + err = strconvErr(err) + return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) + } + dv.SetFloat(f64) + return nil + case reflect.String: + switch v := src.(type) { + case string: + dv.SetString(v) + return nil + case []byte: + dv.SetString(string(v)) + return nil + } + } + + return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest) +} + +func strconvErr(err error) error { + if ne, ok := err.(*strconv.NumError); ok { + return ne.Err + } + return err +} + +func cloneBytes(b []byte) []byte { + if b == nil { + return nil + } else { + c := make([]byte, len(b)) + copy(c, b) + return c + } +} + +func asString(src interface{}) string { + switch v := src.(type) { + case string: + return v + case []byte: + return string(v) + } + rv := reflect.ValueOf(src) + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(rv.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.FormatUint(rv.Uint(), 10) + case reflect.Float64: + return strconv.FormatFloat(rv.Float(), 'g', -1, 64) + case reflect.Float32: + return strconv.FormatFloat(rv.Float(), 'g', -1, 32) + case reflect.Bool: + return strconv.FormatBool(rv.Bool()) + } + return fmt.Sprintf("%v", src) +} + +func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.AppendInt(buf, rv.Int(), 10), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return strconv.AppendUint(buf, rv.Uint(), 10), true + case reflect.Float32: + return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true + case reflect.Float64: + return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true + case reflect.Bool: + return strconv.AppendBool(buf, rv.Bool()), true + case reflect.String: + s := rv.String() + return append(buf, s...), true + } + return +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/decimal.go b/vendor/github.com/denisenkom/go-mssqldb/decimal.go index 76f3a6b5b49e4..372f64b4eb148 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/decimal.go +++ b/vendor/github.com/denisenkom/go-mssqldb/decimal.go @@ -32,7 +32,13 @@ func (d Decimal) ToFloat64() float64 { return val } +const autoScale = 100 + func Float64ToDecimal(f float64) (Decimal, error) { + return Float64ToDecimalScale(f, autoScale) +} + +func Float64ToDecimalScale(f float64, scale uint8) (Decimal, error) { var dec Decimal if math.IsNaN(f) { return dec, errors.New("NaN") @@ -49,10 +55,10 @@ func Float64ToDecimal(f float64) (Decimal, error) { } dec.prec = 20 var integer float64 - for dec.scale = 0; dec.scale <= 20; dec.scale++ { + for dec.scale = 0; dec.scale <= scale; dec.scale++ { integer = f * scaletblflt64[dec.scale] _, frac := math.Modf(integer) - if frac == 0 { + if frac == 0 && scale == autoScale { break } } @@ -73,7 +79,7 @@ func init() { } } -func (d Decimal) Bytes() []byte { +func (d Decimal) BigInt() big.Int { bytes := make([]byte, 16) binary.BigEndian.PutUint32(bytes[0:4], d.integer[3]) binary.BigEndian.PutUint32(bytes[4:8], d.integer[2]) @@ -84,9 +90,19 @@ func (d Decimal) Bytes() []byte { if !d.positive { x.Neg(&x) } + return x +} + +func (d Decimal) Bytes() []byte { + x := d.BigInt() return scaleBytes(x.String(), d.scale) } +func (d Decimal) UnscaledBytes() []byte { + x := d.BigInt() + return x.Bytes() +} + func scaleBytes(s string, scale uint8) []byte { z := make([]byte, 0, len(s)+1) if s[0] == '-' || s[0] == '+' { diff --git a/vendor/github.com/denisenkom/go-mssqldb/doc.go b/vendor/github.com/denisenkom/go-mssqldb/doc.go new file mode 100644 index 0000000000000..2e54929c572f9 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/doc.go @@ -0,0 +1,14 @@ +// package mssql implements the TDS protocol used to connect to MS SQL Server (sqlserver) +// database servers. +// +// This package registers the driver: +// sqlserver: uses native "@" parameter placeholder names and does no pre-processing. +// +// If the ordinal position is used for query parameters, identifiers will be named +// "@p1", "@p2", ... "@pN". +// +// Please refer to the README for the format of the DSN. There are multiple DSN +// formats accepted: ADO style, ODBC style, and URL style. The following is an +// example of a URL style DSN: +// sqlserver://sa:mypass@localhost:1234?database=master&connection+timeout=30 +package mssql diff --git a/vendor/github.com/denisenkom/go-mssqldb/charset.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/charset.go similarity index 94% rename from vendor/github.com/denisenkom/go-mssqldb/charset.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/charset.go index f1cc247a9d6f1..8dc2279ea4833 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/charset.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/charset.go @@ -1,14 +1,14 @@ -package mssql +package cp type charsetMap struct { sb [256]rune // single byte runes, -1 for a double byte character lead byte db map[int]rune // double byte runes } -func collation2charset(col collation) *charsetMap { +func collation2charset(col Collation) *charsetMap { // http://msdn.microsoft.com/en-us/library/ms144250.aspx // http://msdn.microsoft.com/en-us/library/ms144250(v=sql.105).aspx - switch col.sortId { + switch col.SortId { case 30, 31, 32, 33, 34: return cp437 case 40, 41, 42, 44, 49, 55, 56, 57, 58, 59, 60, 61: @@ -86,7 +86,7 @@ func collation2charset(col collation) *charsetMap { return cp1252 } -func charset2utf8(col collation, s []byte) string { +func CharsetToUTF8(col Collation, s []byte) string { cm := collation2charset(col) if cm == nil { return string(s) diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/collation.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/collation.go new file mode 100644 index 0000000000000..ae7b03bf137eb --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/collation.go @@ -0,0 +1,20 @@ +package cp + +// http://msdn.microsoft.com/en-us/library/dd340437.aspx + +type Collation struct { + LcidAndFlags uint32 + SortId uint8 +} + +func (c Collation) getLcid() uint32 { + return c.LcidAndFlags & 0x000fffff +} + +func (c Collation) getFlags() uint32 { + return (c.LcidAndFlags & 0x0ff00000) >> 20 +} + +func (c Collation) getVersion() uint32 { + return (c.LcidAndFlags & 0xf0000000) >> 28 +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp1250.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1250.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp1250.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1250.go index 8207366be764b..5c8094ec3cc83 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp1250.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1250.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp1250 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp1251.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1251.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp1251.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1251.go index f5b81c3934cf4..dc5896770ca15 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp1251.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1251.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp1251 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp1252.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1252.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp1252.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1252.go index ed705d35a7a27..5ae8703542f2e 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp1252.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1252.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp1252 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp1253.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1253.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp1253.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1253.go index cb1e1a7623695..52c8e07aa69ec 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp1253.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1253.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp1253 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp1254.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1254.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp1254.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1254.go index a4b09bb44f54c..5d8864a521fe7 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp1254.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1254.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp1254 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp1255.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1255.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp1255.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1255.go index 97f9ee9e91330..60619895d92cc 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp1255.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1255.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp1255 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp1256.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1256.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp1256.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1256.go index e91241b4489ec..ffd04b3e5bb95 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp1256.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1256.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp1256 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp1257.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1257.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp1257.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1257.go index bd93e6f891a84..492da72ea4d03 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp1257.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1257.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp1257 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp1258.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1258.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp1258.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1258.go index 4e1f8ac9438f5..80be52c596645 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp1258.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1258.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp1258 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp437.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp437.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp437.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp437.go index f47f8ecc77b41..76dedfb8ef53e 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp437.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp437.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp437 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp850.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp850.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp850.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp850.go index e6b3d16904462..927ab249efa71 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp850.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp850.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp850 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp874.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp874.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp874.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp874.go index 9d691a1a59572..723bf6c3926ab 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp874.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp874.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp874 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp932.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp932.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp932.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp932.go index 980c55d815f60..5fc1377424a85 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp932.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp932.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp932 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp936.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp936.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp936.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp936.go index fca5da76d4d09..d1fac12e26bbd 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp936.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp936.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp936 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp949.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp949.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp949.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp949.go index cddfcbc852261..52c708dfa5cf3 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp949.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp949.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp949 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/cp950.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp950.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/cp950.go rename to vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp950.go index cbf25cb91a8c8..1301cd0f05274 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/cp950.go +++ b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp950.go @@ -1,4 +1,4 @@ -package mssql +package cp var cp950 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/log.go b/vendor/github.com/denisenkom/go-mssqldb/log.go index f350aed09988e..9b8c551e88d92 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/log.go +++ b/vendor/github.com/denisenkom/go-mssqldb/log.go @@ -4,19 +4,26 @@ import ( "log" ) -type Logger log.Logger +type Logger interface { + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +type optionalLogger struct { + logger Logger +} -func (logger *Logger) Printf(format string, v ...interface{}) { - if logger != nil { - (*log.Logger)(logger).Printf(format, v...) +func (o optionalLogger) Printf(format string, v ...interface{}) { + if o.logger != nil { + o.logger.Printf(format, v...) } else { log.Printf(format, v...) } } -func (logger *Logger) Println(v ...interface{}) { - if logger != nil { - (*log.Logger)(logger).Println(v...) +func (o optionalLogger) Println(v ...interface{}) { + if o.logger != nil { + o.logger.Println(v...) } else { log.Println(v...) } diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql.go b/vendor/github.com/denisenkom/go-mssqldb/mssql.go index 9663651e7c60c..9065da53dea51 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/mssql.go +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql.go @@ -1,122 +1,331 @@ package mssql import ( + "context" "database/sql" "database/sql/driver" "encoding/binary" "errors" "fmt" "io" - "log" "math" "net" + "reflect" "strings" "time" + "unicode" ) +// ReturnStatus may be used to return the return value from a proc. +// +// var rs mssql.ReturnStatus +// _, err := db.Exec("theproc", &rs) +// log.Printf("return status = %d", rs) +type ReturnStatus int32 + +var driverInstance = &Driver{processQueryText: true} +var driverInstanceNoProcess = &Driver{processQueryText: false} + func init() { - sql.Register("mssql", &MssqlDriver{}) + sql.Register("mssql", driverInstance) + sql.Register("sqlserver", driverInstanceNoProcess) + createDialer = func(p *connectParams) Dialer { + return netDialer{&net.Dialer{KeepAlive: p.keepAlive}} + } } -type MssqlDriver struct { - log *log.Logger +var createDialer func(p *connectParams) Dialer + +type netDialer struct { + nd *net.Dialer } -func (d *MssqlDriver) SetLogger(logger *log.Logger) { - d.log = logger +func (d netDialer) DialContext(ctx context.Context, network string, addr string) (net.Conn, error) { + return d.nd.DialContext(ctx, network, addr) } -func CheckBadConn(err error) error { - if err == io.EOF { - return driver.ErrBadConn +type Driver struct { + log optionalLogger + + processQueryText bool +} + +// OpenConnector opens a new connector. Useful to dial with a context. +func (d *Driver) OpenConnector(dsn string) (*Connector, error) { + params, err := parseConnectParams(dsn) + if err != nil { + return nil, err } + return &Connector{ + params: params, + driver: d, + }, nil +} - switch e := err.(type) { - case net.Error: - if e.Timeout() { - return e - } - return driver.ErrBadConn - default: - return err +func (d *Driver) Open(dsn string) (driver.Conn, error) { + return d.open(context.Background(), dsn) +} + +func SetLogger(logger Logger) { + driverInstance.SetLogger(logger) + driverInstanceNoProcess.SetLogger(logger) +} + +func (d *Driver) SetLogger(logger Logger) { + d.log = optionalLogger{logger} +} + +// NewConnector creates a new connector from a DSN. +// The returned connector may be used with sql.OpenDB. +func NewConnector(dsn string) (*Connector, error) { + params, err := parseConnectParams(dsn) + if err != nil { + return nil, err + } + c := &Connector{ + params: params, + driver: driverInstanceNoProcess, } + return c, nil } -type MssqlConn struct { - sess *tdsSession +// Connector holds the parsed DSN and is ready to make a new connection +// at any time. +// +// In the future, settings that cannot be passed through a string DSN +// may be set directly on the connector. +type Connector struct { + params connectParams + driver *Driver + + // SessionInitSQL is executed after marking a given session to be reset. + // When not present, the next query will still reset the session to the + // database defaults. + // + // When present the connection will immediately mark the session to + // be reset, then execute the SessionInitSQL text to setup the session + // that may be different from the base database defaults. + // + // For Example, the application relies on the following defaults + // but is not allowed to set them at the database system level. + // + // SET XACT_ABORT ON; + // SET TEXTSIZE -1; + // SET ANSI_NULLS ON; + // SET LOCK_TIMEOUT 10000; + // + // SessionInitSQL should not attempt to manually call sp_reset_connection. + // This will happen at the TDS layer. + // + // SessionInitSQL is optional. The session will be reset even if + // SessionInitSQL is empty. + SessionInitSQL string + + // Dialer sets a custom dialer for all network operations. + // If Dialer is not set, normal net dialers are used. + Dialer Dialer } -func (c *MssqlConn) Commit() error { - headers := []headerStruct{ - {hdrtype: dataStmHdrTransDescr, - data: transDescrHdr{c.sess.tranid, 1}.pack()}, +type Dialer interface { + DialContext(ctx context.Context, network string, addr string) (net.Conn, error) +} + +func (c *Connector) getDialer(p *connectParams) Dialer { + if c != nil && c.Dialer != nil { + return c.Dialer + } + return createDialer(p) +} + +type Conn struct { + connector *Connector + sess *tdsSession + transactionCtx context.Context + resetSession bool + + processQueryText bool + connectionGood bool + + outs map[string]interface{} + returnStatus *ReturnStatus +} + +func (c *Conn) setReturnStatus(s ReturnStatus) { + if c.returnStatus == nil { + return } - if err := sendCommitXact(c.sess.buf, headers, "", 0, 0, ""); err != nil { + *c.returnStatus = s +} + +func (c *Conn) checkBadConn(err error) error { + // this is a hack to address Issue #275 + // we set connectionGood flag to false if + // error indicates that connection is not usable + // but we return actual error instead of ErrBadConn + // this will cause connection to stay in a pool + // but next request to this connection will return ErrBadConn + + // it might be possible to revise this hack after + // https://github.com/golang/go/issues/20807 + // is implemented + switch err { + case nil: + return nil + case io.EOF: + c.connectionGood = false + return driver.ErrBadConn + case driver.ErrBadConn: + // It is an internal programming error if driver.ErrBadConn + // is ever passed to this function. driver.ErrBadConn should + // only ever be returned in response to a *mssql.Conn.connectionGood == false + // check in the external facing API. + panic("driver.ErrBadConn in checkBadConn. This should not happen.") + } + + switch err.(type) { + case net.Error: + c.connectionGood = false + return err + case StreamError: + c.connectionGood = false + return err + default: return err } +} + +func (c *Conn) clearOuts() { + c.outs = nil +} +func (c *Conn) simpleProcessResp(ctx context.Context) error { tokchan := make(chan tokenStruct, 5) - go processResponse(c.sess, tokchan) + go processResponse(ctx, c.sess, tokchan, c.outs) + c.clearOuts() for tok := range tokchan { switch token := tok.(type) { + case doneStruct: + if token.isError() { + return c.checkBadConn(token.getError()) + } case error: - return token + return c.checkBadConn(token) } } return nil } -func (c *MssqlConn) Rollback() error { +func (c *Conn) Commit() error { + if !c.connectionGood { + return driver.ErrBadConn + } + if err := c.sendCommitRequest(); err != nil { + return c.checkBadConn(err) + } + return c.simpleProcessResp(c.transactionCtx) +} + +func (c *Conn) sendCommitRequest() error { headers := []headerStruct{ {hdrtype: dataStmHdrTransDescr, data: transDescrHdr{c.sess.tranid, 1}.pack()}, } - if err := sendRollbackXact(c.sess.buf, headers, "", 0, 0, ""); err != nil { - return err + reset := c.resetSession + c.resetSession = false + if err := sendCommitXact(c.sess.buf, headers, "", 0, 0, "", reset); err != nil { + if c.sess.logFlags&logErrors != 0 { + c.sess.log.Printf("Failed to send CommitXact with %v", err) + } + c.connectionGood = false + return fmt.Errorf("Faild to send CommitXact: %v", err) } + return nil +} - tokchan := make(chan tokenStruct, 5) - go processResponse(c.sess, tokchan) - for tok := range tokchan { - switch token := tok.(type) { - case error: - return token +func (c *Conn) Rollback() error { + if !c.connectionGood { + return driver.ErrBadConn + } + if err := c.sendRollbackRequest(); err != nil { + return c.checkBadConn(err) + } + return c.simpleProcessResp(c.transactionCtx) +} + +func (c *Conn) sendRollbackRequest() error { + headers := []headerStruct{ + {hdrtype: dataStmHdrTransDescr, + data: transDescrHdr{c.sess.tranid, 1}.pack()}, + } + reset := c.resetSession + c.resetSession = false + if err := sendRollbackXact(c.sess.buf, headers, "", 0, 0, "", reset); err != nil { + if c.sess.logFlags&logErrors != 0 { + c.sess.log.Printf("Failed to send RollbackXact with %v", err) } + c.connectionGood = false + return fmt.Errorf("Failed to send RollbackXact: %v", err) } return nil } -func (c *MssqlConn) Begin() (driver.Tx, error) { +func (c *Conn) Begin() (driver.Tx, error) { + return c.begin(context.Background(), isolationUseCurrent) +} + +func (c *Conn) begin(ctx context.Context, tdsIsolation isoLevel) (tx driver.Tx, err error) { + if !c.connectionGood { + return nil, driver.ErrBadConn + } + err = c.sendBeginRequest(ctx, tdsIsolation) + if err != nil { + return nil, c.checkBadConn(err) + } + tx, err = c.processBeginResponse(ctx) + if err != nil { + return nil, c.checkBadConn(err) + } + return +} + +func (c *Conn) sendBeginRequest(ctx context.Context, tdsIsolation isoLevel) error { + c.transactionCtx = ctx headers := []headerStruct{ {hdrtype: dataStmHdrTransDescr, data: transDescrHdr{0, 1}.pack()}, } - if err := sendBeginXact(c.sess.buf, headers, 0, ""); err != nil { - return nil, CheckBadConn(err) - } - tokchan := make(chan tokenStruct, 5) - go processResponse(c.sess, tokchan) - for tok := range tokchan { - switch token := tok.(type) { - case error: - if c.sess.tranid != 0 { - return nil, token - } - return nil, CheckBadConn(token) + reset := c.resetSession + c.resetSession = false + if err := sendBeginXact(c.sess.buf, headers, tdsIsolation, "", reset); err != nil { + if c.sess.logFlags&logErrors != 0 { + c.sess.log.Printf("Failed to send BeginXact with %v", err) } + c.connectionGood = false + return fmt.Errorf("Failed to send BeginXact: %v", err) + } + return nil +} + +func (c *Conn) processBeginResponse(ctx context.Context) (driver.Tx, error) { + if err := c.simpleProcessResp(ctx); err != nil { + return nil, err } // successful BEGINXACT request will return sess.tranid // for started transaction return c, nil } -func (d *MssqlDriver) Open(dsn string) (driver.Conn, error) { +func (d *Driver) open(ctx context.Context, dsn string) (*Conn, error) { params, err := parseConnectParams(dsn) if err != nil { return nil, err } + return d.connect(ctx, nil, params) +} - sess, err := connect(params) +// connect to the server, using the provided context for dialing only. +func (d *Driver) connect(ctx context.Context, c *Connector, params connectParams) (*Conn, error) { + sess, err := connect(ctx, c, d.log, params) if err != nil { // main server failed, try fail-over partner if params.failOverPartner == "" { @@ -128,24 +337,31 @@ func (d *MssqlDriver) Open(dsn string) (driver.Conn, error) { params.port = params.failOverPort } - sess, err = connect(params) + sess, err = connect(ctx, c, d.log, params) if err != nil { // fail-over partner also failed, now fail return nil, err } } - conn := &MssqlConn{sess} - conn.sess.log = (*Logger)(d.log) + conn := &Conn{ + connector: c, + sess: sess, + transactionCtx: context.Background(), + processQueryText: d.processQueryText, + connectionGood: true, + } + conn.sess.log = d.log + return conn, nil } -func (c *MssqlConn) Close() error { +func (c *Conn) Close() error { return c.sess.buf.transport.Close() } -type MssqlStmt struct { - c *MssqlConn +type Stmt struct { + c *Conn query string paramCount int notifSub *queryNotifSub @@ -157,16 +373,29 @@ type queryNotifSub struct { timeout uint32 } -func (c *MssqlConn) Prepare(query string) (driver.Stmt, error) { - q, paramCount := parseParams(query) - return &MssqlStmt{c, q, paramCount, nil}, nil +func (c *Conn) Prepare(query string) (driver.Stmt, error) { + if !c.connectionGood { + return nil, driver.ErrBadConn + } + if len(query) > 10 && strings.EqualFold(query[:10], "INSERTBULK") { + return c.prepareCopyIn(context.Background(), query) + } + return c.prepareContext(context.Background(), query) } -func (s *MssqlStmt) Close() error { +func (c *Conn) prepareContext(ctx context.Context, query string) (*Stmt, error) { + paramCount := -1 + if c.processQueryText { + query, paramCount = parseParams(query) + } + return &Stmt{c, query, paramCount, nil}, nil +} + +func (s *Stmt) Close() error { return nil } -func (s *MssqlStmt) SetQueryNotification(id, options string, timeout time.Duration) { +func (s *Stmt) SetQueryNotification(id, options string, timeout time.Duration) { to := uint32(timeout / time.Second) if to < 1 { to = 1 @@ -174,183 +403,326 @@ func (s *MssqlStmt) SetQueryNotification(id, options string, timeout time.Durati s.notifSub = &queryNotifSub{id, options, to} } -func (s *MssqlStmt) NumInput() int { +func (s *Stmt) NumInput() int { return s.paramCount } -func (s *MssqlStmt) sendQuery(args []driver.Value) (err error) { +func (s *Stmt) sendQuery(args []namedValue) (err error) { headers := []headerStruct{ {hdrtype: dataStmHdrTransDescr, data: transDescrHdr{s.c.sess.tranid, 1}.pack()}, } if s.notifSub != nil { - headers = append(headers, headerStruct{hdrtype: dataStmHdrQueryNotif, - data: queryNotifHdr{s.notifSub.msgText, s.notifSub.options, s.notifSub.timeout}.pack()}) + headers = append(headers, + headerStruct{ + hdrtype: dataStmHdrQueryNotif, + data: queryNotifHdr{ + s.notifSub.msgText, + s.notifSub.options, + s.notifSub.timeout, + }.pack(), + }) } - if len(args) != s.paramCount { - return errors.New(fmt.Sprintf("sql: expected %d parameters, got %d", s.paramCount, len(args))) - } - if s.c.sess.logFlags&logSQL != 0 { - s.c.sess.log.Println(s.query) + conn := s.c + + // no need to check number of parameters here, it is checked by database/sql + if conn.sess.logFlags&logSQL != 0 { + conn.sess.log.Println(s.query) } - if s.c.sess.logFlags&logParams != 0 && len(args) > 0 { + if conn.sess.logFlags&logParams != 0 && len(args) > 0 { for i := 0; i < len(args); i++ { - s.c.sess.log.Printf("\t@p%d\t%v\n", i+1, args[i]) + if len(args[i].Name) > 0 { + s.c.sess.log.Printf("\t@%s\t%v\n", args[i].Name, args[i].Value) + } else { + s.c.sess.log.Printf("\t@p%d\t%v\n", i+1, args[i].Value) + } } - } + + reset := conn.resetSession + conn.resetSession = false if len(args) == 0 { - if err = sendSqlBatch72(s.c.sess.buf, s.query, headers); err != nil { - if s.c.sess.tranid != 0 { - return err + if err = sendSqlBatch72(conn.sess.buf, s.query, headers, reset); err != nil { + if conn.sess.logFlags&logErrors != 0 { + conn.sess.log.Printf("Failed to send SqlBatch with %v", err) } - return CheckBadConn(err) + conn.connectionGood = false + return fmt.Errorf("failed to send SQL Batch: %v", err) } } else { - params := make([]Param, len(args)+2) - decls := make([]string, len(args)) - params[0], err = s.makeParam(s.query) - if err != nil { - return - } - for i, val := range args { - params[i+2], err = s.makeParam(val) + proc := sp_ExecuteSql + var params []param + if isProc(s.query) { + proc.name = s.query + params, _, err = s.makeRPCParams(args, 0) if err != nil { return } - name := fmt.Sprintf("@p%d", i+1) - params[i+2].Name = name - decls[i] = fmt.Sprintf("%s %s", name, makeDecl(params[i+2].ti)) - } - params[1], err = s.makeParam(strings.Join(decls, ",")) - if err != nil { - return + } else { + var decls []string + params, decls, err = s.makeRPCParams(args, 2) + if err != nil { + return + } + params[0] = makeStrParam(s.query) + params[1] = makeStrParam(strings.Join(decls, ",")) } - if err = sendRpc(s.c.sess.buf, headers, Sp_ExecuteSql, 0, params); err != nil { - if s.c.sess.tranid != 0 { - return err + if err = sendRpc(conn.sess.buf, headers, proc, 0, params, reset); err != nil { + if conn.sess.logFlags&logErrors != 0 { + conn.sess.log.Printf("Failed to send Rpc with %v", err) } - return CheckBadConn(err) + conn.connectionGood = false + return fmt.Errorf("Failed to send RPC: %v", err) } } return } -func (s *MssqlStmt) Query(args []driver.Value) (res driver.Rows, err error) { +// isProc takes the query text in s and determines if it is a stored proc name +// or SQL text. +func isProc(s string) bool { + if len(s) == 0 { + return false + } + const ( + outside = iota + text + escaped + ) + st := outside + var rn1, rPrev rune + for _, r := range s { + rPrev = rn1 + rn1 = r + switch r { + // No newlines or string sequences. + case '\n', '\r', '\'', ';': + return false + } + switch st { + case outside: + switch { + case unicode.IsSpace(r): + return false + case r == '[': + st = escaped + continue + case r == ']' && rPrev == ']': + st = escaped + continue + case unicode.IsLetter(r): + st = text + } + case text: + switch { + case r == '.': + st = outside + continue + case unicode.IsSpace(r): + return false + } + case escaped: + switch { + case r == ']': + st = outside + continue + } + } + } + return true +} + +func (s *Stmt) makeRPCParams(args []namedValue, offset int) ([]param, []string, error) { + var err error + params := make([]param, len(args)+offset) + decls := make([]string, len(args)) + for i, val := range args { + params[i+offset], err = s.makeParam(val.Value) + if err != nil { + return nil, nil, err + } + var name string + if len(val.Name) > 0 { + name = "@" + val.Name + } else { + name = fmt.Sprintf("@p%d", val.Ordinal) + } + params[i+offset].Name = name + decls[i] = fmt.Sprintf("%s %s", name, makeDecl(params[i+offset].ti)) + } + return params, decls, nil +} + +type namedValue struct { + Name string + Ordinal int + Value driver.Value +} + +func convertOldArgs(args []driver.Value) []namedValue { + list := make([]namedValue, len(args)) + for i, v := range args { + list[i] = namedValue{ + Ordinal: i + 1, + Value: v, + } + } + return list +} + +func (s *Stmt) Query(args []driver.Value) (driver.Rows, error) { + return s.queryContext(context.Background(), convertOldArgs(args)) +} + +func (s *Stmt) queryContext(ctx context.Context, args []namedValue) (rows driver.Rows, err error) { + if !s.c.connectionGood { + return nil, driver.ErrBadConn + } if err = s.sendQuery(args); err != nil { - return + return nil, s.c.checkBadConn(err) } + return s.processQueryResponse(ctx) +} + +func (s *Stmt) processQueryResponse(ctx context.Context) (res driver.Rows, err error) { tokchan := make(chan tokenStruct, 5) - go processResponse(s.c.sess, tokchan) + ctx, cancel := context.WithCancel(ctx) + go processResponse(ctx, s.c.sess, tokchan, s.c.outs) + s.c.clearOuts() // process metadata - var cols []string + var cols []columnStruct loop: for tok := range tokchan { switch token := tok.(type) { - // by ignoring DONE token we effectively - // skip empty result-sets - // this improves results in queryes like that: + // By ignoring DONE token we effectively + // skip empty result-sets. + // This improves results in queries like that: // set nocount on; select 1 // see TestIgnoreEmptyResults test //case doneStruct: //break loop case []columnStruct: - cols = make([]string, len(token)) - for i, col := range token { - cols[i] = col.ColName - } + cols = token break loop - case error: - if s.c.sess.tranid != 0 { - return nil, token + case doneStruct: + if token.isError() { + return nil, s.c.checkBadConn(token.getError()) } - return nil, CheckBadConn(token) + case ReturnStatus: + s.c.setReturnStatus(token) + case error: + return nil, s.c.checkBadConn(token) } } - return &MssqlRows{sess: s.c.sess, tokchan: tokchan, cols: cols}, nil + res = &Rows{stmt: s, tokchan: tokchan, cols: cols, cancel: cancel} + return } -func (s *MssqlStmt) Exec(args []driver.Value) (res driver.Result, err error) { +func (s *Stmt) Exec(args []driver.Value) (driver.Result, error) { + return s.exec(context.Background(), convertOldArgs(args)) +} + +func (s *Stmt) exec(ctx context.Context, args []namedValue) (res driver.Result, err error) { + if !s.c.connectionGood { + return nil, driver.ErrBadConn + } if err = s.sendQuery(args); err != nil { - return + return nil, s.c.checkBadConn(err) } + if res, err = s.processExec(ctx); err != nil { + return nil, s.c.checkBadConn(err) + } + return +} + +func (s *Stmt) processExec(ctx context.Context) (res driver.Result, err error) { tokchan := make(chan tokenStruct, 5) - go processResponse(s.c.sess, tokchan) + go processResponse(ctx, s.c.sess, tokchan, s.c.outs) + s.c.clearOuts() var rowCount int64 for token := range tokchan { switch token := token.(type) { case doneInProcStruct: if token.Status&doneCount != 0 { - rowCount = int64(token.RowCount) + rowCount += int64(token.RowCount) } case doneStruct: if token.Status&doneCount != 0 { - rowCount = int64(token.RowCount) - } - case error: - if s.c.sess.logFlags&logErrors != 0 { - s.c.sess.log.Println("got error:", token) + rowCount += int64(token.RowCount) } - if s.c.sess.tranid != 0 { - return nil, token + if token.isError() { + return nil, token.getError() } - return nil, CheckBadConn(token) + case ReturnStatus: + s.c.setReturnStatus(token) + case error: + return nil, token } } - return &MssqlResult{s.c, rowCount}, nil + return &Result{s.c, rowCount}, nil } -type MssqlRows struct { - sess *tdsSession - cols []string +type Rows struct { + stmt *Stmt + cols []columnStruct tokchan chan tokenStruct - nextCols []string + nextCols []columnStruct + + cancel func() } -func (rc *MssqlRows) Close() error { +func (rc *Rows) Close() error { + rc.cancel() for _ = range rc.tokchan { } rc.tokchan = nil return nil } -func (rc *MssqlRows) Columns() (res []string) { - return rc.cols +func (rc *Rows) Columns() (res []string) { + res = make([]string, len(rc.cols)) + for i, col := range rc.cols { + res[i] = col.ColName + } + return } -func (rc *MssqlRows) Next(dest []driver.Value) (err error) { +func (rc *Rows) Next(dest []driver.Value) error { + if !rc.stmt.c.connectionGood { + return driver.ErrBadConn + } if rc.nextCols != nil { return io.EOF } for tok := range rc.tokchan { switch tokdata := tok.(type) { case []columnStruct: - cols := make([]string, len(tokdata)) - for i, col := range tokdata { - cols[i] = col.ColName - } - rc.nextCols = cols + rc.nextCols = tokdata return io.EOF case []interface{}: for i := range dest { dest[i] = tokdata[i] } return nil + case doneStruct: + if tokdata.isError() { + return rc.stmt.c.checkBadConn(tokdata.getError()) + } case error: - return tokdata + return rc.stmt.c.checkBadConn(tokdata) } } return io.EOF } -func (rc *MssqlRows) HasNextResultSet() bool { +func (rc *Rows) HasNextResultSet() bool { return rc.nextCols != nil } -func (rc *MssqlRows) NextResultSet() error { +func (rc *Rows) NextResultSet() error { rc.cols = rc.nextCols rc.nextCols = nil if rc.cols == nil { @@ -359,11 +731,69 @@ func (rc *MssqlRows) NextResultSet() error { return nil } -func (s *MssqlStmt) makeParam(val driver.Value) (res Param, err error) { +// It should return +// the value type that can be used to scan types into. For example, the database +// column type "bigint" this should return "reflect.TypeOf(int64(0))". +func (r *Rows) ColumnTypeScanType(index int) reflect.Type { + return makeGoLangScanType(r.cols[index].ti) +} + +// RowsColumnTypeDatabaseTypeName may be implemented by Rows. It should return the +// database system type name without the length. Type names should be uppercase. +// Examples of returned types: "VARCHAR", "NVARCHAR", "VARCHAR2", "CHAR", "TEXT", +// "DECIMAL", "SMALLINT", "INT", "BIGINT", "BOOL", "[]BIGINT", "JSONB", "XML", +// "TIMESTAMP". +func (r *Rows) ColumnTypeDatabaseTypeName(index int) string { + return makeGoLangTypeName(r.cols[index].ti) +} + +// RowsColumnTypeLength may be implemented by Rows. It should return the length +// of the column type if the column is a variable length type. If the column is +// not a variable length type ok should return false. +// If length is not limited other than system limits, it should return math.MaxInt64. +// The following are examples of returned values for various types: +// TEXT (math.MaxInt64, true) +// varchar(10) (10, true) +// nvarchar(10) (10, true) +// decimal (0, false) +// int (0, false) +// bytea(30) (30, true) +func (r *Rows) ColumnTypeLength(index int) (int64, bool) { + return makeGoLangTypeLength(r.cols[index].ti) +} + +// It should return +// the precision and scale for decimal types. If not applicable, ok should be false. +// The following are examples of returned values for various types: +// decimal(38, 4) (38, 4, true) +// int (0, 0, false) +// decimal (math.MaxInt64, math.MaxInt64, true) +func (r *Rows) ColumnTypePrecisionScale(index int) (int64, int64, bool) { + return makeGoLangTypePrecisionScale(r.cols[index].ti) +} + +// The nullable value should +// be true if it is known the column may be null, or false if the column is known +// to be not nullable. +// If the column nullability is unknown, ok should be false. +func (r *Rows) ColumnTypeNullable(index int) (nullable, ok bool) { + nullable = r.cols[index].Flags&colFlagNullable != 0 + ok = true + return +} + +func makeStrParam(val string) (res param) { + res.ti.TypeId = typeNVarChar + res.buffer = str2ucs2(val) + res.ti.Size = len(res.buffer) + return +} + +func (s *Stmt) makeParam(val driver.Value) (res param, err error) { if val == nil { - res.ti.TypeId = typeNVarChar + res.ti.TypeId = typeNull res.buffer = nil - res.ti.Size = 2 + res.ti.Size = 0 return } switch val := val.(type) { @@ -372,19 +802,34 @@ func (s *MssqlStmt) makeParam(val driver.Value) (res Param, err error) { res.buffer = make([]byte, 8) res.ti.Size = 8 binary.LittleEndian.PutUint64(res.buffer, uint64(val)) + case sql.NullInt64: + // only null values should be getting here + res.ti.TypeId = typeIntN + res.ti.Size = 8 + res.buffer = []byte{} + case float64: res.ti.TypeId = typeFltN res.ti.Size = 8 res.buffer = make([]byte, 8) binary.LittleEndian.PutUint64(res.buffer, math.Float64bits(val)) + case sql.NullFloat64: + // only null values should be getting here + res.ti.TypeId = typeFltN + res.ti.Size = 8 + res.buffer = []byte{} + case []byte: res.ti.TypeId = typeBigVarBin res.ti.Size = len(val) res.buffer = val case string: + res = makeStrParam(val) + case sql.NullString: + // only null values should be getting here res.ti.TypeId = typeNVarChar - res.buffer = str2ucs2(val) - res.ti.Size = len(res.buffer) + res.buffer = nil + res.ti.Size = 8000 case bool: res.ti.TypeId = typeBitN res.ti.Size = 1 @@ -392,55 +837,39 @@ func (s *MssqlStmt) makeParam(val driver.Value) (res Param, err error) { if val { res.buffer[0] = 1 } + case sql.NullBool: + // only null values should be getting here + res.ti.TypeId = typeBitN + res.ti.Size = 1 + res.buffer = []byte{} + case time.Time: if s.c.sess.loginAck.TDSVersion >= verTDS73 { res.ti.TypeId = typeDateTimeOffsetN res.ti.Scale = 7 - res.ti.Size = 10 - buf := make([]byte, 10) - res.buffer = buf - days, ns := dateTime2(val) - ns /= 100 - buf[0] = byte(ns) - buf[1] = byte(ns >> 8) - buf[2] = byte(ns >> 16) - buf[3] = byte(ns >> 24) - buf[4] = byte(ns >> 32) - buf[5] = byte(days) - buf[6] = byte(days >> 8) - buf[7] = byte(days >> 16) - _, offset := val.Zone() - offset /= 60 - buf[8] = byte(offset) - buf[9] = byte(offset >> 8) + res.buffer = encodeDateTimeOffset(val, int(res.ti.Scale)) + res.ti.Size = len(res.buffer) } else { res.ti.TypeId = typeDateTimeN - res.ti.Size = 8 - res.buffer = make([]byte, 8) - ref := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) - dur := val.Sub(ref) - days := dur / (24 * time.Hour) - tm := (300 * (dur % (24 * time.Hour))) / time.Second - binary.LittleEndian.PutUint32(res.buffer[0:4], uint32(days)) - binary.LittleEndian.PutUint32(res.buffer[4:8], uint32(tm)) + res.buffer = encodeDateTime(val) + res.ti.Size = len(res.buffer) } default: - err = fmt.Errorf("mssql: unknown type for %T", val) - return + return s.makeParamExtra(val) } return } -type MssqlResult struct { - c *MssqlConn +type Result struct { + c *Conn rowsAffected int64 } -func (r *MssqlResult) RowsAffected() (int64, error) { +func (r *Result) RowsAffected() (int64, error) { return r.rowsAffected, nil } -func (r *MssqlResult) LastInsertId() (int64, error) { +func (r *Result) LastInsertId() (int64, error) { s, err := r.c.Prepare("select cast(@@identity as bigint)") if err != nil { return 0, err @@ -462,3 +891,83 @@ func (r *MssqlResult) LastInsertId() (int64, error) { lastInsertId := dest[0].(int64) return lastInsertId, nil } + +var _ driver.Pinger = &Conn{} + +// Ping is used to check if the remote server is available and satisfies the Pinger interface. +func (c *Conn) Ping(ctx context.Context) error { + if !c.connectionGood { + return driver.ErrBadConn + } + stmt := &Stmt{c, `select 1;`, 0, nil} + _, err := stmt.ExecContext(ctx, nil) + return err +} + +var _ driver.ConnBeginTx = &Conn{} + +// BeginTx satisfies ConnBeginTx. +func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if !c.connectionGood { + return nil, driver.ErrBadConn + } + if opts.ReadOnly { + return nil, errors.New("Read-only transactions are not supported") + } + + var tdsIsolation isoLevel + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + tdsIsolation = isolationUseCurrent + case sql.LevelReadUncommitted: + tdsIsolation = isolationReadUncommited + case sql.LevelReadCommitted: + tdsIsolation = isolationReadCommited + case sql.LevelWriteCommitted: + return nil, errors.New("LevelWriteCommitted isolation level is not supported") + case sql.LevelRepeatableRead: + tdsIsolation = isolationRepeatableRead + case sql.LevelSnapshot: + tdsIsolation = isolationSnapshot + case sql.LevelSerializable: + tdsIsolation = isolationSerializable + case sql.LevelLinearizable: + return nil, errors.New("LevelLinearizable isolation level is not supported") + default: + return nil, errors.New("Isolation level is not supported or unknown") + } + return c.begin(ctx, tdsIsolation) +} + +func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if !c.connectionGood { + return nil, driver.ErrBadConn + } + if len(query) > 10 && strings.EqualFold(query[:10], "INSERTBULK") { + return c.prepareCopyIn(ctx, query) + } + + return c.prepareContext(ctx, query) +} + +func (s *Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + if !s.c.connectionGood { + return nil, driver.ErrBadConn + } + list := make([]namedValue, len(args)) + for i, nv := range args { + list[i] = namedValue(nv) + } + return s.queryContext(ctx, list) +} + +func (s *Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + if !s.c.connectionGood { + return nil, driver.ErrBadConn + } + list := make([]namedValue, len(args)) + for i, nv := range args { + list[i] = namedValue(nv) + } + return s.exec(ctx, list) +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go deleted file mode 100644 index b8cffe9c01fe4..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build go1.3 - -package mssql - -import ( - "net" -) - -func createDialer(p connectParams) *net.Dialer { - return &net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive} -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go deleted file mode 100644 index 3c7e72716d14c..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !go1.3 - -package mssql - -import ( - "net" -) - -func createDialer(p *connectParams) *net.Dialer { - return &net.Dialer{Timeout: p.dial_timeout} -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go new file mode 100644 index 0000000000000..833f047163778 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go @@ -0,0 +1,47 @@ +// +build go1.10 + +package mssql + +import ( + "context" + "database/sql/driver" +) + +var _ driver.Connector = &Connector{} +var _ driver.SessionResetter = &Conn{} + +func (c *Conn) ResetSession(ctx context.Context) error { + if !c.connectionGood { + return driver.ErrBadConn + } + c.resetSession = true + + if c.connector == nil || len(c.connector.SessionInitSQL) == 0 { + return nil + } + + s, err := c.prepareContext(ctx, c.connector.SessionInitSQL) + if err != nil { + return driver.ErrBadConn + } + _, err = s.exec(ctx, nil) + if err != nil { + return driver.ErrBadConn + } + + return nil +} + +// Connect to the server and return a TDS connection. +func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { + conn, err := c.driver.connect(ctx, c, c.params) + if err == nil { + err = conn.ResetSession(ctx) + } + return conn, err +} + +// Driver underlying the Connector. +func (c *Connector) Driver() driver.Driver { + return c.driver +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go new file mode 100644 index 0000000000000..65a11720da21a --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go @@ -0,0 +1,171 @@ +// +build go1.9 + +package mssql + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + "reflect" + "time" + + // "github.com/cockroachdb/apd" + "cloud.google.com/go/civil" +) + +// Type alias provided for compatibility. + +type MssqlDriver = Driver // Deprecated: users should transition to the new name when possible. +type MssqlBulk = Bulk // Deprecated: users should transition to the new name when possible. +type MssqlBulkOptions = BulkOptions // Deprecated: users should transition to the new name when possible. +type MssqlConn = Conn // Deprecated: users should transition to the new name when possible. +type MssqlResult = Result // Deprecated: users should transition to the new name when possible. +type MssqlRows = Rows // Deprecated: users should transition to the new name when possible. +type MssqlStmt = Stmt // Deprecated: users should transition to the new name when possible. + +var _ driver.NamedValueChecker = &Conn{} + +// VarChar parameter types. +type VarChar string + +type NVarCharMax string +type VarCharMax string + +// DateTime1 encodes parameters to original DateTime SQL types. +type DateTime1 time.Time + +// DateTimeOffset encodes parameters to DateTimeOffset, preserving the UTC offset. +type DateTimeOffset time.Time + +func convertInputParameter(val interface{}) (interface{}, error) { + switch v := val.(type) { + case VarChar: + return val, nil + case NVarCharMax: + return val, nil + case VarCharMax: + return val, nil + case DateTime1: + return val, nil + case DateTimeOffset: + return val, nil + case civil.Date: + return val, nil + case civil.DateTime: + return val, nil + case civil.Time: + return val, nil + // case *apd.Decimal: + // return nil + default: + return driver.DefaultParameterConverter.ConvertValue(v) + } +} + +func (c *Conn) CheckNamedValue(nv *driver.NamedValue) error { + switch v := nv.Value.(type) { + case sql.Out: + if c.outs == nil { + c.outs = make(map[string]interface{}) + } + c.outs[nv.Name] = v.Dest + + if v.Dest == nil { + return errors.New("destination is a nil pointer") + } + + dest_info := reflect.ValueOf(v.Dest) + if dest_info.Kind() != reflect.Ptr { + return errors.New("destination not a pointer") + } + + if dest_info.IsNil() { + return errors.New("destination is a nil pointer") + } + + pointed_value := reflect.Indirect(dest_info) + + // don't allow pointer to a pointer, only pointer to a value can be handled + // correctly + if pointed_value.Kind() == reflect.Ptr { + return errors.New("destination is a pointer to a pointer") + } + + // Unwrap the Out value and check the inner value. + val := pointed_value.Interface() + if val == nil { + return errors.New("MSSQL does not allow NULL value without type for OUTPUT parameters") + } + conv, err := convertInputParameter(val) + if err != nil { + return err + } + if conv == nil { + // if we replace with nil we would lose type information + nv.Value = sql.Out{Dest: val} + } else { + nv.Value = sql.Out{Dest: conv} + } + return nil + case *ReturnStatus: + *v = 0 // By default the return value should be zero. + c.returnStatus = v + return driver.ErrRemoveArgument + default: + var err error + nv.Value, err = convertInputParameter(nv.Value) + return err + } +} + +func (s *Stmt) makeParamExtra(val driver.Value) (res param, err error) { + switch val := val.(type) { + case VarChar: + res.ti.TypeId = typeBigVarChar + res.buffer = []byte(val) + res.ti.Size = len(res.buffer) + case VarCharMax: + res.ti.TypeId = typeBigVarChar + res.buffer = []byte(val) + res.ti.Size = 0 // currently zero forces varchar(max) + case NVarCharMax: + res.ti.TypeId = typeNVarChar + res.buffer = str2ucs2(string(val)) + res.ti.Size = 0 // currently zero forces nvarchar(max) + case DateTime1: + t := time.Time(val) + res.ti.TypeId = typeDateTimeN + res.buffer = encodeDateTime(t) + res.ti.Size = len(res.buffer) + case DateTimeOffset: + res.ti.TypeId = typeDateTimeOffsetN + res.ti.Scale = 7 + res.buffer = encodeDateTimeOffset(time.Time(val), int(res.ti.Scale)) + res.ti.Size = len(res.buffer) + case civil.Date: + res.ti.TypeId = typeDateN + res.buffer = encodeDate(val.In(time.UTC)) + res.ti.Size = len(res.buffer) + case civil.DateTime: + res.ti.TypeId = typeDateTime2N + res.ti.Scale = 7 + res.buffer = encodeDateTime2(val.In(time.UTC), int(res.ti.Scale)) + res.ti.Size = len(res.buffer) + case civil.Time: + res.ti.TypeId = typeTimeN + res.ti.Scale = 7 + res.buffer = encodeTime(val.Hour, val.Minute, val.Second, val.Nanosecond, int(res.ti.Scale)) + res.ti.Size = len(res.buffer) + case sql.Out: + res, err = s.makeParam(val.Dest) + res.Flags = fByRevValue + default: + err = fmt.Errorf("mssql: unknown type for %T", val) + } + return +} + +func scanIntoOut(name string, fromServer, scanInto interface{}) error { + return convertAssign(scanInto, fromServer) +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go new file mode 100644 index 0000000000000..9680f5107e0d2 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go @@ -0,0 +1,16 @@ +// +build !go1.9 + +package mssql + +import ( + "database/sql/driver" + "fmt" +) + +func (s *Stmt) makeParamExtra(val driver.Value) (param, error) { + return param{}, fmt.Errorf("mssql: unknown type for %T", val) +} + +func scanIntoOut(name string, fromServer, scanInto interface{}) error { + return fmt.Errorf("mssql: unsupported OUTPUT type, use a newer Go version") +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/net.go b/vendor/github.com/denisenkom/go-mssqldb/net.go index 72a87340db5fd..e3864d1a22245 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/net.go +++ b/vendor/github.com/denisenkom/go-mssqldb/net.go @@ -14,7 +14,7 @@ type timeoutConn struct { continueRead bool } -func NewTimeoutConn(conn net.Conn, timeout time.Duration) *timeoutConn { +func newTimeoutConn(conn net.Conn, timeout time.Duration) *timeoutConn { return &timeoutConn{ c: conn, timeout: timeout, @@ -33,7 +33,7 @@ func (c *timeoutConn) Read(b []byte) (n int, err error) { c.continueRead = false } if !c.continueRead { - var packet uint8 + var packet packetType packet, err = c.buf.BeginRead() if err != nil { err = fmt.Errorf("Cannot read handshake packet: %s", err.Error()) @@ -48,9 +48,11 @@ func (c *timeoutConn) Read(b []byte) (n int, err error) { n, err = c.buf.Read(b) return } - err = c.c.SetDeadline(time.Now().Add(c.timeout)) - if err != nil { - return + if c.timeout > 0 { + err = c.c.SetDeadline(time.Now().Add(c.timeout)) + if err != nil { + return + } } return c.c.Read(b) } @@ -58,7 +60,7 @@ func (c *timeoutConn) Read(b []byte) (n int, err error) { func (c *timeoutConn) Write(b []byte) (n int, err error) { if c.buf != nil { if !c.packetPending { - c.buf.BeginPacket(packPrelogin) + c.buf.BeginPacket(packPrelogin, false) c.packetPending = true } n, err = c.buf.Write(b) @@ -67,9 +69,11 @@ func (c *timeoutConn) Write(b []byte) (n int, err error) { } return } - err = c.c.SetDeadline(time.Now().Add(c.timeout)) - if err != nil { - return + if c.timeout > 0 { + err = c.c.SetDeadline(time.Now().Add(c.timeout)) + if err != nil { + return + } } return c.c.Write(b) } diff --git a/vendor/github.com/denisenkom/go-mssqldb/ntlm.go b/vendor/github.com/denisenkom/go-mssqldb/ntlm.go index f853435c6ebdc..7c0cc4f785c39 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/ntlm.go +++ b/vendor/github.com/denisenkom/go-mssqldb/ntlm.go @@ -15,56 +15,56 @@ import ( ) const ( - NEGOTIATE_MESSAGE = 1 - CHALLENGE_MESSAGE = 2 - AUTHENTICATE_MESSAGE = 3 + _NEGOTIATE_MESSAGE = 1 + _CHALLENGE_MESSAGE = 2 + _AUTHENTICATE_MESSAGE = 3 ) const ( - NEGOTIATE_UNICODE = 0x00000001 - NEGOTIATE_OEM = 0x00000002 - NEGOTIATE_TARGET = 0x00000004 - NEGOTIATE_SIGN = 0x00000010 - NEGOTIATE_SEAL = 0x00000020 - NEGOTIATE_DATAGRAM = 0x00000040 - NEGOTIATE_LMKEY = 0x00000080 - NEGOTIATE_NTLM = 0x00000200 - NEGOTIATE_ANONYMOUS = 0x00000800 - NEGOTIATE_OEM_DOMAIN_SUPPLIED = 0x00001000 - NEGOTIATE_OEM_WORKSTATION_SUPPLIED = 0x00002000 - NEGOTIATE_ALWAYS_SIGN = 0x00008000 - NEGOTIATE_TARGET_TYPE_DOMAIN = 0x00010000 - NEGOTIATE_TARGET_TYPE_SERVER = 0x00020000 - NEGOTIATE_EXTENDED_SESSIONSECURITY = 0x00080000 - NEGOTIATE_IDENTIFY = 0x00100000 - REQUEST_NON_NT_SESSION_KEY = 0x00400000 - NEGOTIATE_TARGET_INFO = 0x00800000 - NEGOTIATE_VERSION = 0x02000000 - NEGOTIATE_128 = 0x20000000 - NEGOTIATE_KEY_EXCH = 0x40000000 - NEGOTIATE_56 = 0x80000000 + _NEGOTIATE_UNICODE = 0x00000001 + _NEGOTIATE_OEM = 0x00000002 + _NEGOTIATE_TARGET = 0x00000004 + _NEGOTIATE_SIGN = 0x00000010 + _NEGOTIATE_SEAL = 0x00000020 + _NEGOTIATE_DATAGRAM = 0x00000040 + _NEGOTIATE_LMKEY = 0x00000080 + _NEGOTIATE_NTLM = 0x00000200 + _NEGOTIATE_ANONYMOUS = 0x00000800 + _NEGOTIATE_OEM_DOMAIN_SUPPLIED = 0x00001000 + _NEGOTIATE_OEM_WORKSTATION_SUPPLIED = 0x00002000 + _NEGOTIATE_ALWAYS_SIGN = 0x00008000 + _NEGOTIATE_TARGET_TYPE_DOMAIN = 0x00010000 + _NEGOTIATE_TARGET_TYPE_SERVER = 0x00020000 + _NEGOTIATE_EXTENDED_SESSIONSECURITY = 0x00080000 + _NEGOTIATE_IDENTIFY = 0x00100000 + _REQUEST_NON_NT_SESSION_KEY = 0x00400000 + _NEGOTIATE_TARGET_INFO = 0x00800000 + _NEGOTIATE_VERSION = 0x02000000 + _NEGOTIATE_128 = 0x20000000 + _NEGOTIATE_KEY_EXCH = 0x40000000 + _NEGOTIATE_56 = 0x80000000 ) -const NEGOTIATE_FLAGS = NEGOTIATE_UNICODE | - NEGOTIATE_NTLM | - NEGOTIATE_OEM_DOMAIN_SUPPLIED | - NEGOTIATE_OEM_WORKSTATION_SUPPLIED | - NEGOTIATE_ALWAYS_SIGN | - NEGOTIATE_EXTENDED_SESSIONSECURITY +const _NEGOTIATE_FLAGS = _NEGOTIATE_UNICODE | + _NEGOTIATE_NTLM | + _NEGOTIATE_OEM_DOMAIN_SUPPLIED | + _NEGOTIATE_OEM_WORKSTATION_SUPPLIED | + _NEGOTIATE_ALWAYS_SIGN | + _NEGOTIATE_EXTENDED_SESSIONSECURITY -type NTLMAuth struct { +type ntlmAuth struct { Domain string UserName string Password string Workstation string } -func getAuth(user, password, service, workstation string) (Auth, bool) { +func getAuth(user, password, service, workstation string) (auth, bool) { if !strings.ContainsRune(user, '\\') { return nil, false } domain_user := strings.SplitN(user, "\\", 2) - return &NTLMAuth{ + return &ntlmAuth{ Domain: domain_user[0], UserName: domain_user[1], Password: password, @@ -86,13 +86,13 @@ func utf16le(val string) []byte { return v } -func (auth *NTLMAuth) InitialBytes() ([]byte, error) { +func (auth *ntlmAuth) InitialBytes() ([]byte, error) { domain_len := len(auth.Domain) workstation_len := len(auth.Workstation) msg := make([]byte, 40+domain_len+workstation_len) copy(msg, []byte("NTLMSSP\x00")) - binary.LittleEndian.PutUint32(msg[8:], NEGOTIATE_MESSAGE) - binary.LittleEndian.PutUint32(msg[12:], NEGOTIATE_FLAGS) + binary.LittleEndian.PutUint32(msg[8:], _NEGOTIATE_MESSAGE) + binary.LittleEndian.PutUint32(msg[12:], _NEGOTIATE_FLAGS) // Domain Name Fields binary.LittleEndian.PutUint16(msg[16:], uint16(domain_len)) binary.LittleEndian.PutUint16(msg[18:], uint16(domain_len)) @@ -198,11 +198,11 @@ func ntlmSessionResponse(clientNonce [8]byte, serverChallenge [8]byte, password return response(hash, passwordHash) } -func (auth *NTLMAuth) NextBytes(bytes []byte) ([]byte, error) { +func (auth *ntlmAuth) NextBytes(bytes []byte) ([]byte, error) { if string(bytes[0:8]) != "NTLMSSP\x00" { return nil, errorNTLM } - if binary.LittleEndian.Uint32(bytes[8:12]) != CHALLENGE_MESSAGE { + if binary.LittleEndian.Uint32(bytes[8:12]) != _CHALLENGE_MESSAGE { return nil, errorNTLM } flags := binary.LittleEndian.Uint32(bytes[20:24]) @@ -210,7 +210,7 @@ func (auth *NTLMAuth) NextBytes(bytes []byte) ([]byte, error) { copy(challenge[:], bytes[24:32]) var lm, nt []byte - if (flags & NEGOTIATE_EXTENDED_SESSIONSECURITY) != 0 { + if (flags & _NEGOTIATE_EXTENDED_SESSIONSECURITY) != 0 { nonce := clientChallenge() var lm_bytes [24]byte copy(lm_bytes[:8], nonce[:]) @@ -235,7 +235,7 @@ func (auth *NTLMAuth) NextBytes(bytes []byte) ([]byte, error) { msg := make([]byte, 88+lm_len+nt_len+domain_len+user_len+workstation_len) copy(msg, []byte("NTLMSSP\x00")) - binary.LittleEndian.PutUint32(msg[8:], AUTHENTICATE_MESSAGE) + binary.LittleEndian.PutUint32(msg[8:], _AUTHENTICATE_MESSAGE) // Lm Challenge Response Fields binary.LittleEndian.PutUint16(msg[12:], uint16(lm_len)) binary.LittleEndian.PutUint16(msg[14:], uint16(lm_len)) @@ -279,5 +279,5 @@ func (auth *NTLMAuth) NextBytes(bytes []byte) ([]byte, error) { return msg, nil } -func (auth *NTLMAuth) Free() { +func (auth *ntlmAuth) Free() { } diff --git a/vendor/github.com/denisenkom/go-mssqldb/parser.go b/vendor/github.com/denisenkom/go-mssqldb/parser.go index 9e37c16a655c1..8021ca603c954 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/parser.go +++ b/vendor/github.com/denisenkom/go-mssqldb/parser.go @@ -11,6 +11,9 @@ type parser struct { w bytes.Buffer paramCount int paramMax int + + // using map as a set + namedParams map[string]bool } func (p *parser) next() (rune, bool) { @@ -39,13 +42,14 @@ type stateFunc func(*parser) stateFunc func parseParams(query string) (string, int) { p := &parser{ - r: bytes.NewReader([]byte(query)), + r: bytes.NewReader([]byte(query)), + namedParams: map[string]bool{}, } state := parseNormal for state != nil { state = state(p) } - return p.w.String(), p.paramMax + return p.w.String(), p.paramMax + len(p.namedParams) } func parseNormal(p *parser) stateFunc { @@ -55,7 +59,7 @@ func parseNormal(p *parser) stateFunc { return nil } if ch == '?' { - return parseParameter + return parseOrdinalParameter } else if ch == '$' || ch == ':' { ch2, ok := p.next() if !ok { @@ -64,7 +68,9 @@ func parseNormal(p *parser) stateFunc { } p.unread() if ch2 >= '0' && ch2 <= '9' { - return parseParameter + return parseOrdinalParameter + } else if 'a' <= ch2 && ch2 <= 'z' || 'A' <= ch2 && ch2 <= 'Z' { + return parseNamedParameter } } p.write(ch) @@ -83,7 +89,7 @@ func parseNormal(p *parser) stateFunc { } } -func parseParameter(p *parser) stateFunc { +func parseOrdinalParameter(p *parser) stateFunc { var paramN int var ok bool for { @@ -113,6 +119,30 @@ func parseParameter(p *parser) stateFunc { return parseNormal } +func parseNamedParameter(p *parser) stateFunc { + var paramName string + var ok bool + for { + var ch rune + ch, ok = p.next() + if ok && (ch >= '0' && ch <= '9' || 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z') { + paramName = paramName + string(ch) + } else { + break + } + } + if ok { + p.unread() + } + p.namedParams[paramName] = true + p.w.WriteString("@") + p.w.WriteString(paramName) + if !ok { + return nil + } + return parseNormal +} + func parseQuote(p *parser) stateFunc { for { ch, ok := p.next() diff --git a/vendor/github.com/denisenkom/go-mssqldb/rpc.go b/vendor/github.com/denisenkom/go-mssqldb/rpc.go index 00b9b1e217b68..4ca22578fae19 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/rpc.go +++ b/vendor/github.com/denisenkom/go-mssqldb/rpc.go @@ -4,7 +4,7 @@ import ( "encoding/binary" ) -type ProcId struct { +type procId struct { id uint16 name string } @@ -15,24 +15,13 @@ const ( fDefaultValue = 2 ) -type Param struct { +type param struct { Name string Flags uint8 ti typeInfo buffer []byte } -func MakeProcId(name string) (res ProcId) { - res.name = name - if len(name) == 0 { - panic("Proc name shouln't be empty") - } - if len(name) >= 0xffff { - panic("Invalid length of procedure name, should be less than 0xffff") - } - return res -} - const ( fWithRecomp = 1 fNoMetaData = 2 @@ -40,25 +29,25 @@ const ( ) var ( - Sp_Cursor = ProcId{1, ""} - Sp_CursorOpen = ProcId{2, ""} - Sp_CursorPrepare = ProcId{3, ""} - Sp_CursorExecute = ProcId{4, ""} - Sp_CursorPrepExec = ProcId{5, ""} - Sp_CursorUnprepare = ProcId{6, ""} - Sp_CursorFetch = ProcId{7, ""} - Sp_CursorOption = ProcId{8, ""} - Sp_CursorClose = ProcId{9, ""} - Sp_ExecuteSql = ProcId{10, ""} - Sp_Prepare = ProcId{11, ""} - Sp_PrepExec = ProcId{13, ""} - Sp_PrepExecRpc = ProcId{14, ""} - Sp_Unprepare = ProcId{15, ""} + sp_Cursor = procId{1, ""} + sp_CursorOpen = procId{2, ""} + sp_CursorPrepare = procId{3, ""} + sp_CursorExecute = procId{4, ""} + sp_CursorPrepExec = procId{5, ""} + sp_CursorUnprepare = procId{6, ""} + sp_CursorFetch = procId{7, ""} + sp_CursorOption = procId{8, ""} + sp_CursorClose = procId{9, ""} + sp_ExecuteSql = procId{10, ""} + sp_Prepare = procId{11, ""} + sp_PrepExec = procId{13, ""} + sp_PrepExecRpc = procId{14, ""} + sp_Unprepare = procId{15, ""} ) // http://msdn.microsoft.com/en-us/library/dd357576.aspx -func sendRpc(buf *tdsBuffer, headers []headerStruct, proc ProcId, flags uint16, params []Param) (err error) { - buf.BeginPacket(packRPCRequest) +func sendRpc(buf *tdsBuffer, headers []headerStruct, proc procId, flags uint16, params []param, resetSession bool) (err error) { + buf.BeginPacket(packRPCRequest, resetSession) writeAllHeaders(buf, headers) if len(proc.name) == 0 { var idswitch uint16 = 0xffff diff --git a/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go b/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go index a6e95051c9fde..9b5bc6893f061 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go +++ b/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go @@ -113,7 +113,7 @@ type SSPIAuth struct { ctxt SecHandle } -func getAuth(user, password, service, workstation string) (Auth, bool) { +func getAuth(user, password, service, workstation string) (auth, bool) { if user == "" { return &SSPIAuth{Service: service}, true } diff --git a/vendor/github.com/denisenkom/go-mssqldb/tds.go b/vendor/github.com/denisenkom/go-mssqldb/tds.go index fd42dba34a26a..16d9ca826453b 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/tds.go +++ b/vendor/github.com/denisenkom/go-mssqldb/tds.go @@ -1,6 +1,7 @@ package mssql import ( + "context" "crypto/tls" "crypto/x509" "encoding/binary" @@ -9,11 +10,13 @@ import ( "io" "io/ioutil" "net" + "net/url" "os" "sort" "strconv" "strings" "time" + "unicode" "unicode/utf16" "unicode/utf8" ) @@ -47,13 +50,16 @@ func parseInstances(msg []byte) map[string]map[string]string { return results } -func getInstances(address string) (map[string]map[string]string, error) { - conn, err := net.DialTimeout("udp", address+":1434", 5*time.Second) +func getInstances(ctx context.Context, d Dialer, address string) (map[string]map[string]string, error) { + maxTime := 5 * time.Second + ctx, cancel := context.WithTimeout(ctx, maxTime) + defer cancel() + conn, err := d.DialContext(ctx, "udp", address+":1434") if err != nil { return nil, err } defer conn.Close() - conn.SetDeadline(time.Now().Add(5 * time.Second)) + conn.SetDeadline(time.Now().Add(maxTime)) _, err = conn.Write([]byte{3}) if err != nil { return nil, err @@ -79,11 +85,16 @@ const ( ) // packet types +// https://msdn.microsoft.com/en-us/library/dd304214.aspx const ( - packSQLBatch = 1 - packRPCRequest = 3 - packReply = 4 - packCancel = 6 + packSQLBatch packetType = 1 + packRPCRequest = 3 + packReply = 4 + + // 2.2.1.7 Attention: https://msdn.microsoft.com/en-us/library/dd341449.aspx + // 4.19.2 Out-of-Band Attention Signal: https://msdn.microsoft.com/en-us/library/dd305167.aspx + packAttention = 6 + packBulkLoadBCP = 7 packTransMgrReq = 14 packNormal = 15 @@ -119,7 +130,7 @@ type tdsSession struct { columns []columnStruct tranid uint64 logFlags uint64 - log *Logger + log optionalLogger routedServer string routedPort uint16 } @@ -131,6 +142,7 @@ const ( logSQL = 8 logParams = 16 logTransaction = 32 + logDebug = 64 ) type columnStruct struct { @@ -140,19 +152,19 @@ type columnStruct struct { ti typeInfo } -type KeySlice []uint8 +type keySlice []uint8 -func (p KeySlice) Len() int { return len(p) } -func (p KeySlice) Less(i, j int) bool { return p[i] < p[j] } -func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p keySlice) Len() int { return len(p) } +func (p keySlice) Less(i, j int) bool { return p[i] < p[j] } +func (p keySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // http://msdn.microsoft.com/en-us/library/dd357559.aspx func writePrelogin(w *tdsBuffer, fields map[uint8][]byte) error { var err error - w.BeginPacket(packPrelogin) + w.BeginPacket(packPrelogin, false) offset := uint16(5*len(fields) + 1) - keys := make(KeySlice, 0, len(fields)) + keys := make(keySlice, 0, len(fields)) for k, _ := range fields { keys = append(keys, k) } @@ -340,7 +352,7 @@ func manglePassword(password string) []byte { // http://msdn.microsoft.com/en-us/library/dd304019.aspx func sendLogin(w *tdsBuffer, login login) error { - w.BeginPacket(packLogin7) + w.BeginPacket(packLogin7, false) hostname := str2ucs2(login.HostName) username := str2ucs2(login.UserName) password := manglePassword(login.Password) @@ -490,6 +502,11 @@ func readBVarChar(r io.Reader) (res string, err error) { if err != nil { return "", err } + + // A zero length could be returned, return an empty string + if numchars == 0 { + return "", nil + } return readUcs2(r, int(numchars)) } @@ -588,7 +605,7 @@ func (hdr transDescrHdr) pack() (res []byte) { } func writeAllHeaders(w io.Writer, headers []headerStruct) (err error) { - // calculatint total length + // Calculating total length. var totallen uint32 = 4 for _, hdr := range headers { totallen += 4 + 2 + uint32(len(hdr.data)) @@ -616,10 +633,8 @@ func writeAllHeaders(w io.Writer, headers []headerStruct) (err error) { return nil } -func sendSqlBatch72(buf *tdsBuffer, - sqltext string, - headers []headerStruct) (err error) { - buf.BeginPacket(packSQLBatch) +func sendSqlBatch72(buf *tdsBuffer, sqltext string, headers []headerStruct, resetSession bool) (err error) { + buf.BeginPacket(packSQLBatch, resetSession) if err = writeAllHeaders(buf, headers); err != nil { return @@ -632,6 +647,13 @@ func sendSqlBatch72(buf *tdsBuffer, return buf.FinishPacket() } +// 2.2.1.7 Attention: https://msdn.microsoft.com/en-us/library/dd341449.aspx +// 4.19.2 Out-of-Band Attention Signal: https://msdn.microsoft.com/en-us/library/dd305167.aspx +func sendAttention(buf *tdsBuffer) error { + buf.BeginPacket(packAttention, false) + return buf.FinishPacket() +} + type connectParams struct { logFlags uint64 port uint64 @@ -654,6 +676,7 @@ type connectParams struct { typeFlags uint8 failOverPartner string failOverPort uint64 + packetSize uint16 } func splitConnectionString(dsn string) (res map[string]string) { @@ -677,19 +700,251 @@ func splitConnectionString(dsn string) (res map[string]string) { return res } +// Splits a URL in the ODBC format +func splitConnectionStringOdbc(dsn string) (map[string]string, error) { + res := map[string]string{} + + type parserState int + const ( + // Before the start of a key + parserStateBeforeKey parserState = iota + + // Inside a key + parserStateKey + + // Beginning of a value. May be bare or braced + parserStateBeginValue + + // Inside a bare value + parserStateBareValue + + // Inside a braced value + parserStateBracedValue + + // A closing brace inside a braced value. + // May be the end of the value or an escaped closing brace, depending on the next character + parserStateBracedValueClosingBrace + + // After a value. Next character should be a semicolon or whitespace. + parserStateEndValue + ) + + var state = parserStateBeforeKey + + var key string + var value string + + for i, c := range dsn { + switch state { + case parserStateBeforeKey: + switch { + case c == '=': + return res, fmt.Errorf("Unexpected character = at index %d. Expected start of key or semi-colon or whitespace.", i) + case !unicode.IsSpace(c) && c != ';': + state = parserStateKey + key += string(c) + } + + case parserStateKey: + switch c { + case '=': + key = normalizeOdbcKey(key) + if len(key) == 0 { + return res, fmt.Errorf("Unexpected end of key at index %d.", i) + } + + state = parserStateBeginValue + + case ';': + // Key without value + key = normalizeOdbcKey(key) + if len(key) == 0 { + return res, fmt.Errorf("Unexpected end of key at index %d.", i) + } + + res[key] = value + key = "" + value = "" + state = parserStateBeforeKey + + default: + key += string(c) + } + + case parserStateBeginValue: + switch { + case c == '{': + state = parserStateBracedValue + case c == ';': + // Empty value + res[key] = value + key = "" + state = parserStateBeforeKey + case unicode.IsSpace(c): + // Ignore whitespace + default: + state = parserStateBareValue + value += string(c) + } + + case parserStateBareValue: + if c == ';' { + res[key] = strings.TrimRightFunc(value, unicode.IsSpace) + key = "" + value = "" + state = parserStateBeforeKey + } else { + value += string(c) + } + + case parserStateBracedValue: + if c == '}' { + state = parserStateBracedValueClosingBrace + } else { + value += string(c) + } + + case parserStateBracedValueClosingBrace: + if c == '}' { + // Escaped closing brace + value += string(c) + state = parserStateBracedValue + continue + } + + // End of braced value + res[key] = value + key = "" + value = "" + + // This character is the first character past the end, + // so it needs to be parsed like the parserStateEndValue state. + state = parserStateEndValue + switch { + case c == ';': + state = parserStateBeforeKey + case unicode.IsSpace(c): + // Ignore whitespace + default: + return res, fmt.Errorf("Unexpected character %c at index %d. Expected semi-colon or whitespace.", c, i) + } + + case parserStateEndValue: + switch { + case c == ';': + state = parserStateBeforeKey + case unicode.IsSpace(c): + // Ignore whitespace + default: + return res, fmt.Errorf("Unexpected character %c at index %d. Expected semi-colon or whitespace.", c, i) + } + } + } + + switch state { + case parserStateBeforeKey: // Okay + case parserStateKey: // Unfinished key. Treat as key without value. + key = normalizeOdbcKey(key) + if len(key) == 0 { + return res, fmt.Errorf("Unexpected end of key at index %d.", len(dsn)) + } + res[key] = value + case parserStateBeginValue: // Empty value + res[key] = value + case parserStateBareValue: + res[key] = strings.TrimRightFunc(value, unicode.IsSpace) + case parserStateBracedValue: + return res, fmt.Errorf("Unexpected end of braced value at index %d.", len(dsn)) + case parserStateBracedValueClosingBrace: // End of braced value + res[key] = value + case parserStateEndValue: // Okay + } + + return res, nil +} + +// Normalizes the given string as an ODBC-format key +func normalizeOdbcKey(s string) string { + return strings.ToLower(strings.TrimRightFunc(s, unicode.IsSpace)) +} + +// Splits a URL of the form sqlserver://username:password@host/instance?param1=value¶m2=value +func splitConnectionStringURL(dsn string) (map[string]string, error) { + res := map[string]string{} + + u, err := url.Parse(dsn) + if err != nil { + return res, err + } + + if u.Scheme != "sqlserver" { + return res, fmt.Errorf("scheme %s is not recognized", u.Scheme) + } + + if u.User != nil { + res["user id"] = u.User.Username() + p, exists := u.User.Password() + if exists { + res["password"] = p + } + } + + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + host = u.Host + } + + if len(u.Path) > 0 { + res["server"] = host + "\\" + u.Path[1:] + } else { + res["server"] = host + } + + if len(port) > 0 { + res["port"] = port + } + + query := u.Query() + for k, v := range query { + if len(v) > 1 { + return res, fmt.Errorf("key %s provided more than once", k) + } + res[strings.ToLower(k)] = v[0] + } + + return res, nil +} + func parseConnectParams(dsn string) (connectParams, error) { - params := splitConnectionString(dsn) var p connectParams + + var params map[string]string + if strings.HasPrefix(dsn, "odbc:") { + parameters, err := splitConnectionStringOdbc(dsn[len("odbc:"):]) + if err != nil { + return p, err + } + params = parameters + } else if strings.HasPrefix(dsn, "sqlserver://") { + parameters, err := splitConnectionStringURL(dsn) + if err != nil { + return p, err + } + params = parameters + } else { + params = splitConnectionString(dsn) + } + strlog, ok := params["log"] if ok { var err error - p.logFlags, err = strconv.ParseUint(strlog, 10, 0) + p.logFlags, err = strconv.ParseUint(strlog, 10, 64) if err != nil { return p, fmt.Errorf("Invalid log parameter '%s': %s", strlog, err.Error()) } } server := params["server"] - parts := strings.SplitN(server, "\\", 2) + parts := strings.SplitN(server, `\`, 2) p.host = parts[0] if p.host == "." || strings.ToUpper(p.host) == "(LOCAL)" || p.host == "" { p.host = "localhost" @@ -705,36 +960,64 @@ func parseConnectParams(dsn string) (connectParams, error) { strport, ok := params["port"] if ok { var err error - p.port, err = strconv.ParseUint(strport, 0, 16) + p.port, err = strconv.ParseUint(strport, 10, 16) if err != nil { f := "Invalid tcp port '%v': %v" return p, fmt.Errorf(f, strport, err.Error()) } } - p.dial_timeout = 5 * time.Second - p.conn_timeout = 30 * time.Second - strconntimeout, ok := params["connection timeout"] + // https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-network-packet-size-server-configuration-option + // Default packet size remains at 4096 bytes + p.packetSize = 4096 + strpsize, ok := params["packet size"] if ok { - timeout, err := strconv.ParseUint(strconntimeout, 0, 16) + var err error + psize, err := strconv.ParseUint(strpsize, 0, 16) + if err != nil { + f := "Invalid packet size '%v': %v" + return p, fmt.Errorf(f, strpsize, err.Error()) + } + + // Ensure packet size falls within the TDS protocol range of 512 to 32767 bytes + // NOTE: Encrypted connections have a maximum size of 16383 bytes. If you request + // a higher packet size, the server will respond with an ENVCHANGE request to + // alter the packet size to 16383 bytes. + p.packetSize = uint16(psize) + if p.packetSize < 512 { + p.packetSize = 512 + } else if p.packetSize > 32767 { + p.packetSize = 32767 + } + } + + // https://msdn.microsoft.com/en-us/library/dd341108.aspx + // + // Do not set a connection timeout. Use Context to manage such things. + // Default to zero, but still allow it to be set. + if strconntimeout, ok := params["connection timeout"]; ok { + timeout, err := strconv.ParseUint(strconntimeout, 10, 64) if err != nil { f := "Invalid connection timeout '%v': %v" return p, fmt.Errorf(f, strconntimeout, err.Error()) } p.conn_timeout = time.Duration(timeout) * time.Second } - strdialtimeout, ok := params["dial timeout"] - if ok { - timeout, err := strconv.ParseUint(strdialtimeout, 0, 16) + p.dial_timeout = 15 * time.Second + if strdialtimeout, ok := params["dial timeout"]; ok { + timeout, err := strconv.ParseUint(strdialtimeout, 10, 64) if err != nil { f := "Invalid dial timeout '%v': %v" return p, fmt.Errorf(f, strdialtimeout, err.Error()) } p.dial_timeout = time.Duration(timeout) * time.Second } - keepAlive, ok := params["keepalive"] - if ok { - timeout, err := strconv.ParseUint(keepAlive, 0, 16) + + // default keep alive should be 30 seconds according to spec: + // https://msdn.microsoft.com/en-us/library/dd341108.aspx + p.keepAlive = 30 * time.Second + if keepAlive, ok := params["keepalive"]; ok { + timeout, err := strconv.ParseUint(keepAlive, 10, 64) if err != nil { f := "Invalid keepAlive value '%s': %s" return p, fmt.Errorf(f, keepAlive, err.Error()) @@ -743,7 +1026,7 @@ func parseConnectParams(dsn string) (connectParams, error) { } encrypt, ok := params["encrypt"] if ok { - if strings.ToUpper(encrypt) == "DISABLE" { + if strings.EqualFold(encrypt, "DISABLE") { p.disableEncryption = true } else { var err error @@ -819,7 +1102,7 @@ func parseConnectParams(dsn string) (connectParams, error) { return p, nil } -type Auth interface { +type auth interface { InitialBytes() ([]byte, error) NextBytes([]byte) ([]byte, error) Free() @@ -828,7 +1111,7 @@ type Auth interface { // SQL Server AlwaysOn Availability Group Listeners are bound by DNS to a // list of IP addresses. So if there is more than one, try them all and // use the first one that allows a connection. -func dialConnection(p connectParams) (conn net.Conn, err error) { +func dialConnection(ctx context.Context, c *Connector, p connectParams) (conn net.Conn, err error) { var ips []net.IP ips, err = net.LookupIP(p.host) if err != nil { @@ -839,9 +1122,9 @@ func dialConnection(p connectParams) (conn net.Conn, err error) { ips = []net.IP{ip} } if len(ips) == 1 { - d := createDialer(p) + d := c.getDialer(&p) addr := net.JoinHostPort(ips[0].String(), strconv.Itoa(int(p.port))) - conn, err = d.Dial("tcp", addr) + conn, err = d.DialContext(ctx, "tcp", addr) } else { //Try Dials in parallel to avoid waiting for timeouts. @@ -850,9 +1133,9 @@ func dialConnection(p connectParams) (conn net.Conn, err error) { portStr := strconv.Itoa(int(p.port)) for _, ip := range ips { go func(ip net.IP) { - d := createDialer(p) + d := c.getDialer(&p) addr := net.JoinHostPort(ip.String(), portStr) - conn, err := d.Dial("tcp", addr) + conn, err := d.DialContext(ctx, "tcp", addr) if err == nil { connChan <- conn } else { @@ -887,16 +1170,21 @@ func dialConnection(p connectParams) (conn net.Conn, err error) { f := "Unable to open tcp connection with host '%v:%v': %v" return nil, fmt.Errorf(f, p.host, p.port, err.Error()) } - return conn, err } -func connect(p connectParams) (res *tdsSession, err error) { - res = nil +func connect(ctx context.Context, c *Connector, log optionalLogger, p connectParams) (res *tdsSession, err error) { + dialCtx := ctx + if p.dial_timeout > 0 { + var cancel func() + dialCtx, cancel = context.WithTimeout(ctx, p.dial_timeout) + defer cancel() + } // if instance is specified use instance resolution service if p.instance != "" { p.instance = strings.ToUpper(p.instance) - instances, err := getInstances(p.host) + d := c.getDialer(&p) + instances, err := getInstances(dialCtx, d, p.host) if err != nil { f := "Unable to get instances from Sql Server Browser on host %v: %v" return nil, fmt.Errorf(f, p.host, err.Error()) @@ -914,16 +1202,17 @@ func connect(p connectParams) (res *tdsSession, err error) { } initiate_connection: - conn, err := dialConnection(p) + conn, err := dialConnection(dialCtx, c, p) if err != nil { return nil, err } - toconn := NewTimeoutConn(conn, p.conn_timeout) + toconn := newTimeoutConn(conn, p.conn_timeout) - outbuf := newTdsBuffer(4096, toconn) + outbuf := newTdsBuffer(p.packetSize, toconn) sess := tdsSession{ buf: outbuf, + log: log, logFlags: p.logFlags, } @@ -969,8 +1258,7 @@ initiate_connection: if p.certificate != "" { pem, err := ioutil.ReadFile(p.certificate) if err != nil { - f := "Cannot read certificate '%s': %s" - return nil, fmt.Errorf(f, p.certificate, err.Error()) + return nil, fmt.Errorf("Cannot read certificate %q: %v", p.certificate, err) } certs := x509.NewCertPool() certs.AppendCertsFromPEM(pem) @@ -980,15 +1268,20 @@ initiate_connection: config.InsecureSkipVerify = true } config.ServerName = p.hostInCertificate + // fix for https://github.com/denisenkom/go-mssqldb/issues/166 + // Go implementation of TLS payload size heuristic algorithm splits single TDS package to multiple TCP segments, + // while SQL Server seems to expect one TCP segment per encrypted TDS package. + // Setting DynamicRecordSizingDisabled to true disables that algorithm and uses 16384 bytes per TLS package + config.DynamicRecordSizingDisabled = true outbuf.transport = conn toconn.buf = outbuf tlsConn := tls.Client(toconn, &config) err = tlsConn.Handshake() + toconn.buf = nil outbuf.transport = tlsConn if err != nil { - f := "TLS Handshake failed: %s" - return nil, fmt.Errorf(f, err.Error()) + return nil, fmt.Errorf("TLS Handshake failed: %v", err) } if encrypt == encryptOff { outbuf.afterFirst = func() { @@ -999,7 +1292,7 @@ initiate_connection: login := login{ TDSVersion: verTDS74, - PacketSize: uint32(len(outbuf.buf)), + PacketSize: uint32(outbuf.PackageSize()), Database: p.database, OptionFlags2: fODBC, // to get unlimited TEXTSIZE HostName: p.workstation, @@ -1028,7 +1321,7 @@ initiate_connection: var sspi_msg []byte continue_login: tokchan := make(chan tokenStruct, 5) - go processResponse(&sess, tokchan) + go processResponse(context.Background(), &sess, tokchan, nil) success := false for tok := range tokchan { switch token := tok.(type) { @@ -1042,10 +1335,14 @@ continue_login: sess.loginAck = token case error: return nil, fmt.Errorf("Login error: %s", token.Error()) + case doneStruct: + if token.isError() { + return nil, fmt.Errorf("Login error: %s", token.getError()) + } } } if sspi_msg != nil { - outbuf.BeginPacket(packSSPIMessage) + outbuf.BeginPacket(packSSPIMessage, false) _, err = outbuf.Write(sspi_msg) if err != nil { return nil, err diff --git a/vendor/github.com/denisenkom/go-mssqldb/token.go b/vendor/github.com/denisenkom/go-mssqldb/token.go index f20bd14cc9ed3..1acac8a5d2bca 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/token.go +++ b/vendor/github.com/denisenkom/go-mssqldb/token.go @@ -1,30 +1,40 @@ package mssql import ( + "context" "encoding/binary" + "errors" + "fmt" "io" + "net" "strconv" "strings" ) +//go:generate stringer -type token + +type token byte + // token ids const ( - tokenReturnStatus = 121 // 0x79 - tokenColMetadata = 129 // 0x81 - tokenOrder = 169 // 0xA9 - tokenError = 170 // 0xAA - tokenInfo = 171 // 0xAB - tokenLoginAck = 173 // 0xad - tokenRow = 209 // 0xd1 - tokenNbcRow = 210 // 0xd2 - tokenEnvChange = 227 // 0xE3 - tokenSSPI = 237 // 0xED - tokenDone = 253 // 0xFD - tokenDoneProc = 254 - tokenDoneInProc = 255 + tokenReturnStatus token = 121 // 0x79 + tokenColMetadata token = 129 // 0x81 + tokenOrder token = 169 // 0xA9 + tokenError token = 170 // 0xAA + tokenInfo token = 171 // 0xAB + tokenReturnValue token = 0xAC + tokenLoginAck token = 173 // 0xad + tokenRow token = 209 // 0xd1 + tokenNbcRow token = 210 // 0xd2 + tokenEnvChange token = 227 // 0xE3 + tokenSSPI token = 237 // 0xED + tokenDone token = 253 // 0xFD + tokenDoneProc token = 254 + tokenDoneInProc token = 255 ) // done flags +// https://msdn.microsoft.com/en-us/library/dd340421.aspx const ( doneFinal = 0 doneMore = 1 @@ -59,6 +69,13 @@ const ( envRouting = 20 ) +// COLMETADATA flags +// https://msdn.microsoft.com/en-us/library/dd357363.aspx +const ( + colFlagNullable = 1 + // TODO implement more flags +) + // interface for all tokens type tokenStruct interface{} @@ -70,6 +87,19 @@ type doneStruct struct { Status uint16 CurCmd uint16 RowCount uint64 + errors []Error +} + +func (d doneStruct) isError() bool { + return d.Status&doneError != 0 || len(d.errors) > 0 +} + +func (d doneStruct) getError() Error { + if len(d.errors) > 0 { + return d.errors[len(d.errors)-1] + } else { + return Error{Message: "Request failed but didn't provide reason"} + } } type doneInProcStruct doneStruct @@ -120,27 +150,23 @@ func processEnvChg(sess *tdsSession) { badStreamPanic(err) } case envTypLanguage: - //currently ignored - // old value - _, err = readBVarChar(r) - if err != nil { - badStreamPanic(err) - } + // currently ignored // new value - _, err = readBVarChar(r) - if err != nil { + if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - case envTypCharset: - //currently ignored // old value - _, err = readBVarChar(r) - if err != nil { + if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } + case envTypCharset: + // currently ignored // new value - _, err = readBVarChar(r) - if err != nil { + if _, err = readBVarChar(r); err != nil { + badStreamPanic(err) + } + // old value + if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } case envTypPacketSize: @@ -156,38 +182,55 @@ func processEnvChg(sess *tdsSession) { if err != nil { badStreamPanicf("Invalid Packet size value returned from server (%s): %s", packetsize, err.Error()) } - if len(sess.buf.buf) != packetsizei { - newbuf := make([]byte, packetsizei) - copy(newbuf, sess.buf.buf) - sess.buf.buf = newbuf - } + sess.buf.ResizeBuffer(packetsizei) case envSortId: // currently ignored - // old value, should be 0 + // new value if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - // new value + // old value, should be 0 if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } case envSortFlags: // currently ignored - // old value, should be 0 + // new value if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - // new value + // old value, should be 0 if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } case envSqlCollation: // currently ignored - // old value - if _, err = readBVarChar(r); err != nil { + var collationSize uint8 + err = binary.Read(r, binary.LittleEndian, &collationSize) + if err != nil { badStreamPanic(err) } - // new value + + // SQL Collation data should contain 5 bytes in length + if collationSize != 5 { + badStreamPanicf("Invalid SQL Collation size value returned from server: %d", collationSize) + } + + // 4 bytes, contains: LCID ColFlags Version + var info uint32 + err = binary.Read(r, binary.LittleEndian, &info) + if err != nil { + badStreamPanic(err) + } + + // 1 byte, contains: sortID + var sortID uint8 + err = binary.Read(r, binary.LittleEndian, &sortID) + if err != nil { + badStreamPanic(err) + } + + // old value, should be 0 if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } @@ -226,21 +269,21 @@ func processEnvChg(sess *tdsSession) { sess.tranid = 0 case envEnlistDTC: // currently ignored - // old value + // new value, should be 0 if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - // new value, should be 0 + // old value if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } case envDefectTran: // currently ignored - // old value, should be 0 + // new value if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - // new value + // old value, should be 0 if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } @@ -342,11 +385,9 @@ func processEnvChg(sess *tdsSession) { } } -type returnStatus int32 - // http://msdn.microsoft.com/en-us/library/dd358180.aspx -func parseReturnStatus(r *tdsBuffer) returnStatus { - return returnStatus(r.int32()) +func parseReturnStatus(r *tdsBuffer) ReturnStatus { + return ReturnStatus(r.int32()) } func parseOrder(r *tdsBuffer) (res orderStruct) { @@ -358,6 +399,7 @@ func parseOrder(r *tdsBuffer) (res orderStruct) { return res } +// https://msdn.microsoft.com/en-us/library/dd340421.aspx func parseDone(r *tdsBuffer) (res doneStruct) { res.Status = r.uint16() res.CurCmd = r.uint16() @@ -365,6 +407,7 @@ func parseDone(r *tdsBuffer) (res doneStruct) { return res } +// https://msdn.microsoft.com/en-us/library/dd340553.aspx func parseDoneInProc(r *tdsBuffer) (res doneInProcStruct) { res.Status = r.uint16() res.CurCmd = r.uint16() @@ -473,26 +516,57 @@ func parseInfo(r *tdsBuffer) (res Error) { return } -func processResponse(sess *tdsSession, ch chan tokenStruct) { +// https://msdn.microsoft.com/en-us/library/dd303881.aspx +func parseReturnValue(r *tdsBuffer) (nv namedValue) { + /* + ParamOrdinal + ParamName + Status + UserType + Flags + TypeInfo + CryptoMetadata + Value + */ + r.uint16() + nv.Name = r.BVarChar() + r.byte() + r.uint32() // UserType (uint16 prior to 7.2) + r.uint16() + ti := readTypeInfo(r) + nv.Value = ti.Reader(&ti, r) + return +} + +func processSingleResponse(sess *tdsSession, ch chan tokenStruct, outs map[string]interface{}) { defer func() { if err := recover(); err != nil { + if sess.logFlags&logErrors != 0 { + sess.log.Printf("ERROR: Intercepted panic %v", err) + } ch <- err } close(ch) }() + packet_type, err := sess.buf.BeginRead() if err != nil { + if sess.logFlags&logErrors != 0 { + sess.log.Printf("ERROR: BeginRead failed %v", err) + } ch <- err return } if packet_type != packReply { - badStreamPanicf("invalid response packet type, expected REPLY, actual: %d", packet_type) + badStreamPanic(fmt.Errorf("unexpected packet type in reply: got %v, expected %v", packet_type, packReply)) } var columns []columnStruct - var lastError Error - var failed bool + errs := make([]Error, 0, 5) for { - token := sess.buf.byte() + token := token(sess.buf.byte()) + if sess.logFlags&logDebug != 0 { + sess.log.Printf("got token %v", token) + } switch token { case tokenSSPI: ch <- parseSSPIMsg(sess.buf) @@ -514,18 +588,17 @@ func processResponse(sess *tdsSession, ch chan tokenStruct) { ch <- done case tokenDone, tokenDoneProc: done := parseDone(sess.buf) - if sess.logFlags&logRows != 0 && done.Status&doneCount != 0 { - sess.log.Printf("(%d row(s) affected)\n", done.RowCount) - } - if done.Status&doneError != 0 || failed { - ch <- lastError - return + done.errors = errs + if sess.logFlags&logDebug != 0 { + sess.log.Printf("got DONE or DONEPROC status=%d", done.Status) } if done.Status&doneSrvError != 0 { - lastError.Message = "Server Error" - ch <- lastError + ch <- errors.New("SQL Server had internal error") return } + if sess.logFlags&logRows != 0 && done.Status&doneCount != 0 { + sess.log.Printf("(%d row(s) affected)\n", done.RowCount) + } ch <- done if done.Status&doneMore == 0 { return @@ -544,18 +617,188 @@ func processResponse(sess *tdsSession, ch chan tokenStruct) { case tokenEnvChange: processEnvChg(sess) case tokenError: - lastError = parseError72(sess.buf) - failed = true + err := parseError72(sess.buf) + if sess.logFlags&logDebug != 0 { + sess.log.Printf("got ERROR %d %s", err.Number, err.Message) + } + errs = append(errs, err) if sess.logFlags&logErrors != 0 { - sess.log.Println(lastError.Message) + sess.log.Println(err.Message) } case tokenInfo: info := parseInfo(sess.buf) + if sess.logFlags&logDebug != 0 { + sess.log.Printf("got INFO %d %s", info.Number, info.Message) + } if sess.logFlags&logMessages != 0 { sess.log.Println(info.Message) } + case tokenReturnValue: + nv := parseReturnValue(sess.buf) + if len(nv.Name) > 0 { + name := nv.Name[1:] // Remove the leading "@". + if ov, has := outs[name]; has { + err = scanIntoOut(name, nv.Value, ov) + if err != nil { + fmt.Println("scan error", err) + ch <- err + } + } + } default: - badStreamPanicf("Unknown token type: %d", token) + badStreamPanic(fmt.Errorf("unknown token type returned: %v", token)) + } + } +} + +type parseRespIter byte + +const ( + parseRespIterContinue parseRespIter = iota // Continue parsing current token. + parseRespIterNext // Fetch the next token. + parseRespIterDone // Done with parsing the response. +) + +type parseRespState byte + +const ( + parseRespStateNormal parseRespState = iota // Normal response state. + parseRespStateCancel // Query is canceled, wait for server to confirm. + parseRespStateClosing // Waiting for tokens to come through. +) + +type parseResp struct { + sess *tdsSession + ctxDone <-chan struct{} + state parseRespState + cancelError error +} + +func (ts *parseResp) sendAttention(ch chan tokenStruct) parseRespIter { + if err := sendAttention(ts.sess.buf); err != nil { + ts.dlogf("failed to send attention signal %v", err) + ch <- err + return parseRespIterDone + } + ts.state = parseRespStateCancel + return parseRespIterContinue +} + +func (ts *parseResp) dlog(msg string) { + if ts.sess.logFlags&logDebug != 0 { + ts.sess.log.Println(msg) + } +} +func (ts *parseResp) dlogf(f string, v ...interface{}) { + if ts.sess.logFlags&logDebug != 0 { + ts.sess.log.Printf(f, v...) + } +} + +func (ts *parseResp) iter(ctx context.Context, ch chan tokenStruct, tokChan chan tokenStruct) parseRespIter { + switch ts.state { + default: + panic("unknown state") + case parseRespStateNormal: + select { + case tok, ok := <-tokChan: + if !ok { + ts.dlog("response finished") + return parseRespIterDone + } + if err, ok := tok.(net.Error); ok && err.Timeout() { + ts.cancelError = err + ts.dlog("got timeout error, sending attention signal to server") + return ts.sendAttention(ch) + } + // Pass the token along. + ch <- tok + return parseRespIterContinue + + case <-ts.ctxDone: + ts.ctxDone = nil + ts.dlog("got cancel message, sending attention signal to server") + return ts.sendAttention(ch) + } + case parseRespStateCancel: // Read all responses until a DONE or error is received.Auth + select { + case tok, ok := <-tokChan: + if !ok { + ts.dlog("response finished but waiting for attention ack") + return parseRespIterNext + } + switch tok := tok.(type) { + default: + // Ignore all other tokens while waiting. + // The TDS spec says other tokens may arrive after an attention + // signal is sent. Ignore these tokens and continue looking for + // a DONE with attention confirm mark. + case doneStruct: + if tok.Status&doneAttn != 0 { + ts.dlog("got cancellation confirmation from server") + if ts.cancelError != nil { + ch <- ts.cancelError + ts.cancelError = nil + } else { + ch <- ctx.Err() + } + return parseRespIterDone + } + + // If an error happens during cancel, pass it along and just stop. + // We are uncertain to receive more tokens. + case error: + ch <- tok + ts.state = parseRespStateClosing + } + return parseRespIterContinue + case <-ts.ctxDone: + ts.ctxDone = nil + ts.state = parseRespStateClosing + return parseRespIterContinue + } + case parseRespStateClosing: // Wait for current token chan to close. + if _, ok := <-tokChan; !ok { + ts.dlog("response finished") + return parseRespIterDone + } + return parseRespIterContinue + } +} + +func processResponse(ctx context.Context, sess *tdsSession, ch chan tokenStruct, outs map[string]interface{}) { + ts := &parseResp{ + sess: sess, + ctxDone: ctx.Done(), + } + defer func() { + // Ensure any remaining error is piped through + // or the query may look like it executed when it actually failed. + if ts.cancelError != nil { + ch <- ts.cancelError + ts.cancelError = nil + } + close(ch) + }() + + // Loop over multiple responses. + for { + ts.dlog("initiating response reading") + + tokChan := make(chan tokenStruct) + go processSingleResponse(sess, tokChan, outs) + + // Loop over multiple tokens in response. + tokensLoop: + for { + switch ts.iter(ctx, ch, tokChan) { + case parseRespIterContinue: + // Nothing, continue to next token. + case parseRespIterNext: + break tokensLoop + case parseRespIterDone: + return + } } } } diff --git a/vendor/github.com/denisenkom/go-mssqldb/token_string.go b/vendor/github.com/denisenkom/go-mssqldb/token_string.go new file mode 100644 index 0000000000000..c075b23be0138 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/token_string.go @@ -0,0 +1,53 @@ +// Code generated by "stringer -type token"; DO NOT EDIT + +package mssql + +import "fmt" + +const ( + _token_name_0 = "tokenReturnStatus" + _token_name_1 = "tokenColMetadata" + _token_name_2 = "tokenOrdertokenErrortokenInfo" + _token_name_3 = "tokenLoginAck" + _token_name_4 = "tokenRowtokenNbcRow" + _token_name_5 = "tokenEnvChange" + _token_name_6 = "tokenSSPI" + _token_name_7 = "tokenDonetokenDoneProctokenDoneInProc" +) + +var ( + _token_index_0 = [...]uint8{0, 17} + _token_index_1 = [...]uint8{0, 16} + _token_index_2 = [...]uint8{0, 10, 20, 29} + _token_index_3 = [...]uint8{0, 13} + _token_index_4 = [...]uint8{0, 8, 19} + _token_index_5 = [...]uint8{0, 14} + _token_index_6 = [...]uint8{0, 9} + _token_index_7 = [...]uint8{0, 9, 22, 37} +) + +func (i token) String() string { + switch { + case i == 121: + return _token_name_0 + case i == 129: + return _token_name_1 + case 169 <= i && i <= 171: + i -= 169 + return _token_name_2[_token_index_2[i]:_token_index_2[i+1]] + case i == 173: + return _token_name_3 + case 209 <= i && i <= 210: + i -= 209 + return _token_name_4[_token_index_4[i]:_token_index_4[i+1]] + case i == 227: + return _token_name_5 + case i == 237: + return _token_name_6 + case 253 <= i && i <= 255: + i -= 253 + return _token_name_7[_token_index_7[i]:_token_index_7[i+1]] + default: + return fmt.Sprintf("token(%d)", i) + } +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/tran.go b/vendor/github.com/denisenkom/go-mssqldb/tran.go index ae38107661139..cb6436816f97e 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/tran.go +++ b/vendor/github.com/denisenkom/go-mssqldb/tran.go @@ -1,6 +1,7 @@ +package mssql + // Transaction Manager requests // http://msdn.microsoft.com/en-us/library/dd339887.aspx -package mssql import ( "encoding/binary" @@ -16,9 +17,19 @@ const ( tmSaveXact = 9 ) -func sendBeginXact(buf *tdsBuffer, headers []headerStruct, isolation uint8, - name string) (err error) { - buf.BeginPacket(packTransMgrReq) +type isoLevel uint8 + +const ( + isolationUseCurrent isoLevel = 0 + isolationReadUncommited = 1 + isolationReadCommited = 2 + isolationRepeatableRead = 3 + isolationSerializable = 4 + isolationSnapshot = 5 +) + +func sendBeginXact(buf *tdsBuffer, headers []headerStruct, isolation isoLevel, name string, resetSession bool) (err error) { + buf.BeginPacket(packTransMgrReq, resetSession) writeAllHeaders(buf, headers) var rqtype uint16 = tmBeginXact err = binary.Write(buf, binary.LittleEndian, &rqtype) @@ -40,8 +51,8 @@ const ( fBeginXact = 1 ) -func sendCommitXact(buf *tdsBuffer, headers []headerStruct, name string, flags uint8, isolation uint8, newname string) error { - buf.BeginPacket(packTransMgrReq) +func sendCommitXact(buf *tdsBuffer, headers []headerStruct, name string, flags uint8, isolation uint8, newname string, resetSession bool) error { + buf.BeginPacket(packTransMgrReq, resetSession) writeAllHeaders(buf, headers) var rqtype uint16 = tmCommitXact err := binary.Write(buf, binary.LittleEndian, &rqtype) @@ -69,8 +80,8 @@ func sendCommitXact(buf *tdsBuffer, headers []headerStruct, name string, flags u return buf.FinishPacket() } -func sendRollbackXact(buf *tdsBuffer, headers []headerStruct, name string, flags uint8, isolation uint8, newname string) error { - buf.BeginPacket(packTransMgrReq) +func sendRollbackXact(buf *tdsBuffer, headers []headerStruct, name string, flags uint8, isolation uint8, newname string, resetSession bool) error { + buf.BeginPacket(packTransMgrReq, resetSession) writeAllHeaders(buf, headers) var rqtype uint16 = tmRollbackXact err := binary.Write(buf, binary.LittleEndian, &rqtype) diff --git a/vendor/github.com/denisenkom/go-mssqldb/types.go b/vendor/github.com/denisenkom/go-mssqldb/types.go index c38862e9eb767..3bad788b92392 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/types.go +++ b/vendor/github.com/denisenkom/go-mssqldb/types.go @@ -6,8 +6,11 @@ import ( "fmt" "io" "math" + "reflect" "strconv" "time" + + "github.com/denisenkom/go-mssqldb/internal/cp" ) // fixed-length data types @@ -66,6 +69,9 @@ const ( typeNText = 0x63 typeVariant = 0x62 ) +const _PLP_NULL = 0xFFFFFFFFFFFFFFFF +const _UNKNOWN_PLP_LEN = 0xFFFFFFFFFFFFFFFE +const _PLP_TERMINATOR = 0x00000000 // TYPE_INFO rule // http://msdn.microsoft.com/en-us/library/dd358284.aspx @@ -75,11 +81,32 @@ type typeInfo struct { Scale uint8 Prec uint8 Buffer []byte - Collation collation + Collation cp.Collation + UdtInfo udtInfo + XmlInfo xmlInfo Reader func(ti *typeInfo, r *tdsBuffer) (res interface{}) Writer func(w io.Writer, ti typeInfo, buf []byte) (err error) } +// Common Language Runtime (CLR) Instances +// http://msdn.microsoft.com/en-us/library/dd357962.aspx +type udtInfo struct { + //MaxByteSize uint32 + DBName string + SchemaName string + TypeName string + AssemblyQualifiedName string +} + +// XML Values +// http://msdn.microsoft.com/en-us/library/dd304764.aspx +type xmlInfo struct { + SchemaPresent uint8 + DBName string + OwningSchema string + XmlSchemaCollection string +} + func readTypeInfo(r *tdsBuffer) (res typeInfo) { res.TypeId = r.byte() switch res.TypeId { @@ -106,6 +133,7 @@ func readTypeInfo(r *tdsBuffer) (res typeInfo) { return } +// https://msdn.microsoft.com/en-us/library/dd358284.aspx func writeTypeInfo(w io.Writer, ti *typeInfo) (err error) { err = binary.Write(w, binary.LittleEndian, ti.TypeId) if err != nil { @@ -114,7 +142,9 @@ func writeTypeInfo(w io.Writer, ti *typeInfo) (err error) { switch ti.TypeId { case typeNull, typeInt1, typeBit, typeInt2, typeInt4, typeDateTim4, typeFlt4, typeMoney, typeDateTime, typeFlt8, typeMoney4, typeInt8: - // those are fixed length types + // those are fixed length + // https://msdn.microsoft.com/en-us/library/dd341171.aspx + ti.Writer = writeFixedType default: // all others are VARLENTYPE err = writeVarLen(w, ti) if err != nil { @@ -124,19 +154,26 @@ func writeTypeInfo(w io.Writer, ti *typeInfo) (err error) { return } +func writeFixedType(w io.Writer, ti typeInfo, buf []byte) (err error) { + _, err = w.Write(buf) + return +} + +// https://msdn.microsoft.com/en-us/library/dd358341.aspx func writeVarLen(w io.Writer, ti *typeInfo) (err error) { switch ti.TypeId { case typeDateN: - + ti.Writer = writeByteLenType case typeTimeN, typeDateTime2N, typeDateTimeOffsetN: if err = binary.Write(w, binary.LittleEndian, ti.Scale); err != nil { return } ti.Writer = writeByteLenType - case typeGuid, typeIntN, typeDecimal, typeNumeric, + case typeIntN, typeDecimal, typeNumeric, typeBitN, typeDecimalN, typeNumericN, typeFltN, typeMoneyN, typeDateTimeN, typeChar, typeVarChar, typeBinary, typeVarBinary: + // byle len types if ti.Size > 0xff { panic("Invalid size for BYLELEN_TYPE") @@ -156,6 +193,14 @@ func writeVarLen(w io.Writer, ti *typeInfo) (err error) { } } ti.Writer = writeByteLenType + case typeGuid: + if !(ti.Size == 0x10 || ti.Size == 0x00) { + panic("Invalid size for BYLELEN_TYPE") + } + if err = binary.Write(w, binary.LittleEndian, uint8(ti.Size)); err != nil { + return + } + ti.Writer = writeByteLenType case typeBigVarBin, typeBigVarChar, typeBigBinary, typeBigChar, typeNVarChar, typeNChar, typeXml, typeUdt: // short len types @@ -176,14 +221,19 @@ func writeVarLen(w io.Writer, ti *typeInfo) (err error) { return } case typeXml: - var schemapresent uint8 = 0 - if err = binary.Write(w, binary.LittleEndian, schemapresent); err != nil { + if err = binary.Write(w, binary.LittleEndian, ti.XmlInfo.SchemaPresent); err != nil { return } } case typeText, typeImage, typeNText, typeVariant: // LONGLEN_TYPE - panic("LONGLEN_TYPE not implemented") + if err = binary.Write(w, binary.LittleEndian, uint32(ti.Size)); err != nil { + return + } + if err = writeCollation(w, ti.Collation); err != nil { + return + } + ti.Writer = writeLongLenType default: panic("Invalid type") } @@ -198,6 +248,48 @@ func decodeDateTim4(buf []byte) time.Time { 0, int(mins), 0, 0, time.UTC) } +func encodeDateTim4(val time.Time) (buf []byte) { + buf = make([]byte, 4) + + ref := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) + dur := val.Sub(ref) + days := dur / (24 * time.Hour) + mins := val.Hour()*60 + val.Minute() + if days < 0 { + days = 0 + mins = 0 + } + + binary.LittleEndian.PutUint16(buf[:2], uint16(days)) + binary.LittleEndian.PutUint16(buf[2:], uint16(mins)) + return +} + +// encodes datetime value +// type identifier is typeDateTimeN +func encodeDateTime(t time.Time) (res []byte) { + // base date in days since Jan 1st 1900 + basedays := gregorianDays(1900, 1) + // days since Jan 1st 1900 (same TZ as t) + days := gregorianDays(t.Year(), t.YearDay()) - basedays + tm := 300*(t.Second()+t.Minute()*60+t.Hour()*60*60) + t.Nanosecond()*300/1e9 + // minimum and maximum possible + mindays := gregorianDays(1753, 1) - basedays + maxdays := gregorianDays(9999, 365) - basedays + if days < mindays { + days = mindays + tm = 0 + } + if days > maxdays { + days = maxdays + tm = (23*60*60+59*60+59)*300 + 299 + } + res = make([]byte, 8) + binary.LittleEndian.PutUint32(res[0:4], uint32(days)) + binary.LittleEndian.PutUint32(res[4:8], uint32(tm)) + return +} + func decodeDateTime(buf []byte) time.Time { days := int32(binary.LittleEndian.Uint32(buf)) tm := binary.LittleEndian.Uint32(buf[4:]) @@ -207,7 +299,7 @@ func decodeDateTime(buf []byte) time.Time { 0, 0, secs, ns, time.UTC) } -func readFixedType(ti *typeInfo, r *tdsBuffer) (res interface{}) { +func readFixedType(ti *typeInfo, r *tdsBuffer) interface{} { r.ReadFull(ti.Buffer) buf := ti.Buffer switch ti.TypeId { @@ -241,12 +333,7 @@ func readFixedType(ti *typeInfo, r *tdsBuffer) (res interface{}) { panic("shoulnd't get here") } -func writeFixedType(w io.Writer, ti typeInfo, buf []byte) (err error) { - _, err = w.Write(buf) - return -} - -func readByteLenType(ti *typeInfo, r *tdsBuffer) (res interface{}) { +func readByteLenType(ti *typeInfo, r *tdsBuffer) interface{} { size := r.byte() if size == 0 { return nil @@ -278,7 +365,7 @@ func readByteLenType(ti *typeInfo, r *tdsBuffer) (res interface{}) { case 8: return int64(binary.LittleEndian.Uint64(buf)) default: - badStreamPanicf("Invalid size for INTNTYPE") + badStreamPanicf("Invalid size for INTNTYPE: %d", len(buf)) } case typeDecimal, typeNumeric, typeDecimalN, typeNumericN: return decodeDecimal(ti.Prec, ti.Scale, buf) @@ -305,6 +392,10 @@ func readByteLenType(ti *typeInfo, r *tdsBuffer) (res interface{}) { default: badStreamPanicf("Invalid size for MONEYNTYPE") } + case typeDateTim4: + return decodeDateTim4(buf) + case typeDateTime: + return decodeDateTime(buf) case typeDateTimeN: switch len(buf) { case 4: @@ -333,7 +424,7 @@ func writeByteLenType(w io.Writer, ti typeInfo, buf []byte) (err error) { if ti.Size > 0xff { panic("Invalid size for BYTELEN_TYPE") } - err = binary.Write(w, binary.LittleEndian, uint8(ti.Size)) + err = binary.Write(w, binary.LittleEndian, uint8(len(buf))) if err != nil { return } @@ -341,7 +432,7 @@ func writeByteLenType(w io.Writer, ti typeInfo, buf []byte) (err error) { return } -func readShortLenType(ti *typeInfo, r *tdsBuffer) (res interface{}) { +func readShortLenType(ti *typeInfo, r *tdsBuffer) interface{} { size := r.uint16() if size == 0xffff { return nil @@ -384,7 +475,7 @@ func writeShortLenType(w io.Writer, ti typeInfo, buf []byte) (err error) { return } -func readLongLenType(ti *typeInfo, r *tdsBuffer) (res interface{}) { +func readLongLenType(ti *typeInfo, r *tdsBuffer) interface{} { // information about this format can be found here: // http://msdn.microsoft.com/en-us/library/dd304783.aspx // and here: @@ -415,10 +506,51 @@ func readLongLenType(ti *typeInfo, r *tdsBuffer) (res interface{}) { } panic("shoulnd't get here") } +func writeLongLenType(w io.Writer, ti typeInfo, buf []byte) (err error) { + //textptr + err = binary.Write(w, binary.LittleEndian, byte(0x10)) + if err != nil { + return + } + err = binary.Write(w, binary.LittleEndian, uint64(0xFFFFFFFFFFFFFFFF)) + if err != nil { + return + } + err = binary.Write(w, binary.LittleEndian, uint64(0xFFFFFFFFFFFFFFFF)) + if err != nil { + return + } + //timestamp? + err = binary.Write(w, binary.LittleEndian, uint64(0xFFFFFFFFFFFFFFFF)) + if err != nil { + return + } + + err = binary.Write(w, binary.LittleEndian, uint32(ti.Size)) + if err != nil { + return + } + _, err = w.Write(buf) + return +} + +func readCollation(r *tdsBuffer) (res cp.Collation) { + res.LcidAndFlags = r.uint32() + res.SortId = r.byte() + return +} + +func writeCollation(w io.Writer, col cp.Collation) (err error) { + if err = binary.Write(w, binary.LittleEndian, col.LcidAndFlags); err != nil { + return + } + err = binary.Write(w, binary.LittleEndian, col.SortId) + return +} // reads variant value // http://msdn.microsoft.com/en-us/library/dd303302.aspx -func readVariantType(ti *typeInfo, r *tdsBuffer) (res interface{}) { +func readVariantType(ti *typeInfo, r *tdsBuffer) interface{} { size := r.int32() if size == 0 { return nil @@ -510,14 +642,14 @@ func readVariantType(ti *typeInfo, r *tdsBuffer) (res interface{}) { // partially length prefixed stream // http://msdn.microsoft.com/en-us/library/dd340469.aspx -func readPLPType(ti *typeInfo, r *tdsBuffer) (res interface{}) { +func readPLPType(ti *typeInfo, r *tdsBuffer) interface{} { size := r.uint64() var buf *bytes.Buffer switch size { - case 0xffffffffffffffff: + case _PLP_NULL: // null return nil - case 0xfffffffffffffffe: + case _UNKNOWN_PLP_LEN: // size unknown buf = bytes.NewBuffer(make([]byte, 0, 1000)) default: @@ -548,15 +680,16 @@ func readPLPType(ti *typeInfo, r *tdsBuffer) (res interface{}) { } func writePLPType(w io.Writer, ti typeInfo, buf []byte) (err error) { - if err = binary.Write(w, binary.LittleEndian, uint64(len(buf))); err != nil { + if err = binary.Write(w, binary.LittleEndian, uint64(_UNKNOWN_PLP_LEN)); err != nil { return } for { chunksize := uint32(len(buf)) - if err = binary.Write(w, binary.LittleEndian, chunksize); err != nil { + if chunksize == 0 { + err = binary.Write(w, binary.LittleEndian, uint32(_PLP_TERMINATOR)) return } - if chunksize == 0 { + if err = binary.Write(w, binary.LittleEndian, chunksize); err != nil { return } if _, err = w.Write(buf[:chunksize]); err != nil { @@ -606,19 +739,27 @@ func readVarLen(ti *typeInfo, r *tdsBuffer) { } ti.Reader = readByteLenType case typeXml: - schemapresent := r.byte() - if schemapresent != 0 { - // just ignore this for now + ti.XmlInfo.SchemaPresent = r.byte() + if ti.XmlInfo.SchemaPresent != 0 { // dbname - r.BVarChar() + ti.XmlInfo.DBName = r.BVarChar() // owning schema - r.BVarChar() + ti.XmlInfo.OwningSchema = r.BVarChar() // xml schema collection - r.UsVarChar() + ti.XmlInfo.XmlSchemaCollection = r.UsVarChar() } ti.Reader = readPLPType + case typeUdt: + ti.Size = int(r.uint16()) + ti.UdtInfo.DBName = r.BVarChar() + ti.UdtInfo.SchemaName = r.BVarChar() + ti.UdtInfo.TypeName = r.BVarChar() + ti.UdtInfo.AssemblyQualifiedName = r.UsVarChar() + + ti.Buffer = make([]byte, ti.Size) + ti.Reader = readPLPType case typeBigVarBin, typeBigVarChar, typeBigBinary, typeBigChar, - typeNVarChar, typeNChar, typeUdt: + typeNVarChar, typeNChar: // short len types ti.Size = int(r.uint16()) switch ti.TypeId { @@ -701,13 +842,23 @@ func decodeDecimal(prec uint8, scale uint8, buf []byte) []byte { // http://msdn.microsoft.com/en-us/library/ee780895.aspx func decodeDateInt(buf []byte) (days int) { - return int(buf[0]) + int(buf[1])*256 + int(buf[2])*256*256 + days = int(buf[0]) + int(buf[1])*256 + int(buf[2])*256*256 + return } func decodeDate(buf []byte) time.Time { return time.Date(1, 1, 1+decodeDateInt(buf), 0, 0, 0, 0, time.UTC) } +func encodeDate(val time.Time) (buf []byte) { + days, _, _ := dateTime2(val) + buf = make([]byte, 3) + buf[0] = byte(days) + buf[1] = byte(days >> 8) + buf[2] = byte(days >> 16) + return +} + func decodeTimeInt(scale uint8, buf []byte) (sec int, ns int) { var acc uint64 = 0 for i := len(buf) - 1; i >= 0; i-- { @@ -723,11 +874,41 @@ func decodeTimeInt(scale uint8, buf []byte) (sec int, ns int) { return } +// calculate size of time field in bytes +func calcTimeSize(scale int) int { + if scale <= 2 { + return 3 + } else if scale <= 4 { + return 4 + } else { + return 5 + } +} + +// writes time value into a field buffer +// buffer should be at least calcTimeSize long +func encodeTimeInt(seconds, ns, scale int, buf []byte) { + ns_total := int64(seconds)*1000*1000*1000 + int64(ns) + t := ns_total / int64(math.Pow10(int(scale)*-1)*1e9) + buf[0] = byte(t) + buf[1] = byte(t >> 8) + buf[2] = byte(t >> 16) + buf[3] = byte(t >> 24) + buf[4] = byte(t >> 32) +} + func decodeTime(scale uint8, buf []byte) time.Time { sec, ns := decodeTimeInt(scale, buf) return time.Date(1, 1, 1, 0, 0, sec, ns, time.UTC) } +func encodeTime(hour, minute, second, ns, scale int) (buf []byte) { + seconds := hour*3600 + minute*60 + second + buf = make([]byte, calcTimeSize(scale)) + encodeTimeInt(seconds, ns, scale, buf) + return +} + func decodeDateTime2(scale uint8, buf []byte) time.Time { timesize := len(buf) - 3 sec, ns := decodeTimeInt(scale, buf[:timesize]) @@ -735,6 +916,17 @@ func decodeDateTime2(scale uint8, buf []byte) time.Time { return time.Date(1, 1, 1+days, 0, 0, sec, ns, time.UTC) } +func encodeDateTime2(val time.Time, scale int) (buf []byte) { + days, seconds, ns := dateTime2(val) + timesize := calcTimeSize(scale) + buf = make([]byte, 3+timesize) + encodeTimeInt(seconds, ns, scale, buf) + buf[timesize] = byte(days) + buf[timesize+1] = byte(days >> 8) + buf[timesize+2] = byte(days >> 16) + return +} + func decodeDateTimeOffset(scale uint8, buf []byte) time.Time { timesize := len(buf) - 3 - 2 sec, ns := decodeTimeInt(scale, buf[:timesize]) @@ -746,29 +938,48 @@ func decodeDateTimeOffset(scale uint8, buf []byte) time.Time { time.FixedZone("", offset*60)) } -func divFloor(x int64, y int64) int64 { - q := x / y - r := x % y - if r != 0 && ((r < 0) != (y < 0)) { - q-- - } - return q +func encodeDateTimeOffset(val time.Time, scale int) (buf []byte) { + timesize := calcTimeSize(scale) + buf = make([]byte, timesize+2+3) + days, seconds, ns := dateTime2(val.In(time.UTC)) + encodeTimeInt(seconds, ns, scale, buf) + buf[timesize] = byte(days) + buf[timesize+1] = byte(days >> 8) + buf[timesize+2] = byte(days >> 16) + _, offset := val.Zone() + offset /= 60 + buf[timesize+3] = byte(offset) + buf[timesize+4] = byte(offset >> 8) + return } -func dateTime2(t time.Time) (days int32, ns int64) { - // number of days since Jan 1 1970 UTC - days64 := divFloor(t.Unix(), 24*60*60) - // number of days since Jan 1 1 UTC - days = int32(days64) + 1969*365 + 1969/4 - 1969/100 + 1969/400 - // number of seconds within day - secs := t.Unix() - days64*24*60*60 - // number of nanoseconds within day - ns = secs*1e9 + int64(t.Nanosecond()) +// returns days since Jan 1st 0001 in Gregorian calendar +func gregorianDays(year, yearday int) int { + year0 := year - 1 + return year0*365 + year0/4 - year0/100 + year0/400 + yearday - 1 +} + +func dateTime2(t time.Time) (days int, seconds int, ns int) { + // days since Jan 1 1 (in same TZ as t) + days = gregorianDays(t.Year(), t.YearDay()) + seconds = t.Second() + t.Minute()*60 + t.Hour()*60*60 + ns = t.Nanosecond() + if days < 0 { + days = 0 + seconds = 0 + ns = 0 + } + max := gregorianDays(9999, 365) + if days > max { + days = max + seconds = 59 + 59*60 + 23*60*60 + ns = 999999900 + } return } -func decodeChar(col collation, buf []byte) string { - return charset2utf8(col, buf) +func decodeChar(col cp.Collation, buf []byte) string { + return cp.CharsetToUTF8(col, buf) } func decodeUcs2(buf []byte) string { @@ -787,12 +998,129 @@ func decodeXml(ti typeInfo, buf []byte) string { return decodeUcs2(buf) } -func decodeUdt(ti typeInfo, buf []byte) int { - panic("Not implemented") +func decodeUdt(ti typeInfo, buf []byte) []byte { + return buf +} + +// makes go/sql type instance as described below +// It should return +// the value type that can be used to scan types into. For example, the database +// column type "bigint" this should return "reflect.TypeOf(int64(0))". +func makeGoLangScanType(ti typeInfo) reflect.Type { + switch ti.TypeId { + case typeInt1: + return reflect.TypeOf(int64(0)) + case typeInt2: + return reflect.TypeOf(int64(0)) + case typeInt4: + return reflect.TypeOf(int64(0)) + case typeInt8: + return reflect.TypeOf(int64(0)) + case typeFlt4: + return reflect.TypeOf(float64(0)) + case typeIntN: + switch ti.Size { + case 1: + return reflect.TypeOf(int64(0)) + case 2: + return reflect.TypeOf(int64(0)) + case 4: + return reflect.TypeOf(int64(0)) + case 8: + return reflect.TypeOf(int64(0)) + default: + panic("invalid size of INTNTYPE") + } + case typeFlt8: + return reflect.TypeOf(float64(0)) + case typeFltN: + switch ti.Size { + case 4: + return reflect.TypeOf(float64(0)) + case 8: + return reflect.TypeOf(float64(0)) + default: + panic("invalid size of FLNNTYPE") + } + case typeBigVarBin: + return reflect.TypeOf([]byte{}) + case typeVarChar: + return reflect.TypeOf("") + case typeNVarChar: + return reflect.TypeOf("") + case typeBit, typeBitN: + return reflect.TypeOf(true) + case typeDecimalN, typeNumericN: + return reflect.TypeOf([]byte{}) + case typeMoney, typeMoney4, typeMoneyN: + switch ti.Size { + case 4: + return reflect.TypeOf([]byte{}) + case 8: + return reflect.TypeOf([]byte{}) + default: + panic("invalid size of MONEYN") + } + case typeDateTim4: + return reflect.TypeOf(time.Time{}) + case typeDateTime: + return reflect.TypeOf(time.Time{}) + case typeDateTimeN: + switch ti.Size { + case 4: + return reflect.TypeOf(time.Time{}) + case 8: + return reflect.TypeOf(time.Time{}) + default: + panic("invalid size of DATETIMEN") + } + case typeDateTime2N: + return reflect.TypeOf(time.Time{}) + case typeDateN: + return reflect.TypeOf(time.Time{}) + case typeTimeN: + return reflect.TypeOf(time.Time{}) + case typeDateTimeOffsetN: + return reflect.TypeOf(time.Time{}) + case typeBigVarChar: + return reflect.TypeOf("") + case typeBigChar: + return reflect.TypeOf("") + case typeNChar: + return reflect.TypeOf("") + case typeGuid: + return reflect.TypeOf([]byte{}) + case typeXml: + return reflect.TypeOf("") + case typeText: + return reflect.TypeOf("") + case typeNText: + return reflect.TypeOf("") + case typeImage: + return reflect.TypeOf([]byte{}) + case typeBigBinary: + return reflect.TypeOf([]byte{}) + case typeVariant: + return reflect.TypeOf(nil) + default: + panic(fmt.Sprintf("not implemented makeGoLangScanType for type %d", ti.TypeId)) + } } func makeDecl(ti typeInfo) string { switch ti.TypeId { + case typeNull: + // maybe we should use something else here + // this is tested in TestNull + return "nvarchar(1)" + case typeInt1: + return "tinyint" + case typeBigBinary: + return fmt.Sprintf("binary(%d)", ti.Size) + case typeInt2: + return "smallint" + case typeInt4: + return "int" case typeInt8: return "bigint" case typeFlt4: @@ -821,25 +1149,418 @@ func makeDecl(ti typeInfo) string { default: panic("invalid size of FLNNTYPE") } + case typeDecimal, typeDecimalN: + return fmt.Sprintf("decimal(%d, %d)", ti.Prec, ti.Scale) + case typeNumeric, typeNumericN: + return fmt.Sprintf("numeric(%d, %d)", ti.Prec, ti.Scale) + case typeMoney4: + return "smallmoney" + case typeMoney: + return "money" + case typeMoneyN: + switch ti.Size { + case 4: + return "smallmoney" + case 8: + return "money" + default: + panic("invalid size of MONEYNTYPE") + } case typeBigVarBin: if ti.Size > 8000 || ti.Size == 0 { - return fmt.Sprintf("varbinary(max)") + return "varbinary(max)" } else { return fmt.Sprintf("varbinary(%d)", ti.Size) } + case typeNChar: + return fmt.Sprintf("nchar(%d)", ti.Size/2) + case typeBigChar, typeChar: + return fmt.Sprintf("char(%d)", ti.Size) + case typeBigVarChar, typeVarChar: + if ti.Size > 4000 || ti.Size == 0 { + return fmt.Sprintf("varchar(max)") + } else { + return fmt.Sprintf("varchar(%d)", ti.Size) + } case typeNVarChar: if ti.Size > 8000 || ti.Size == 0 { - return fmt.Sprintf("nvarchar(max)") + return "nvarchar(max)" } else { return fmt.Sprintf("nvarchar(%d)", ti.Size/2) } case typeBit, typeBitN: return "bit" - case typeDateTimeN: + case typeDateN: + return "date" + case typeDateTim4: + return "smalldatetime" + case typeDateTime: return "datetime" + case typeDateTimeN: + switch ti.Size { + case 4: + return "smalldatetime" + case 8: + return "datetime" + default: + panic("invalid size of DATETIMNTYPE") + } + case typeTimeN: + return "time" + case typeDateTime2N: + return fmt.Sprintf("datetime2(%d)", ti.Scale) case typeDateTimeOffsetN: return fmt.Sprintf("datetimeoffset(%d)", ti.Scale) + case typeText: + return "text" + case typeNText: + return "ntext" + case typeUdt: + return ti.UdtInfo.TypeName + case typeGuid: + return "uniqueidentifier" + default: + panic(fmt.Sprintf("not implemented makeDecl for type %#x", ti.TypeId)) + } +} + +// makes go/sql type name as described below +// RowsColumnTypeDatabaseTypeName may be implemented by Rows. It should return the +// database system type name without the length. Type names should be uppercase. +// Examples of returned types: "VARCHAR", "NVARCHAR", "VARCHAR2", "CHAR", "TEXT", +// "DECIMAL", "SMALLINT", "INT", "BIGINT", "BOOL", "[]BIGINT", "JSONB", "XML", +// "TIMESTAMP". +func makeGoLangTypeName(ti typeInfo) string { + switch ti.TypeId { + case typeInt1: + return "TINYINT" + case typeInt2: + return "SMALLINT" + case typeInt4: + return "INT" + case typeInt8: + return "BIGINT" + case typeFlt4: + return "REAL" + case typeIntN: + switch ti.Size { + case 1: + return "TINYINT" + case 2: + return "SMALLINT" + case 4: + return "INT" + case 8: + return "BIGINT" + default: + panic("invalid size of INTNTYPE") + } + case typeFlt8: + return "FLOAT" + case typeFltN: + switch ti.Size { + case 4: + return "REAL" + case 8: + return "FLOAT" + default: + panic("invalid size of FLNNTYPE") + } + case typeBigVarBin: + return "VARBINARY" + case typeVarChar: + return "VARCHAR" + case typeNVarChar: + return "NVARCHAR" + case typeBit, typeBitN: + return "BIT" + case typeDecimalN, typeNumericN: + return "DECIMAL" + case typeMoney, typeMoney4, typeMoneyN: + switch ti.Size { + case 4: + return "SMALLMONEY" + case 8: + return "MONEY" + default: + panic("invalid size of MONEYN") + } + case typeDateTim4: + return "SMALLDATETIME" + case typeDateTime: + return "DATETIME" + case typeDateTimeN: + switch ti.Size { + case 4: + return "SMALLDATETIME" + case 8: + return "DATETIME" + default: + panic("invalid size of DATETIMEN") + } + case typeDateTime2N: + return "DATETIME2" + case typeDateN: + return "DATE" + case typeTimeN: + return "TIME" + case typeDateTimeOffsetN: + return "DATETIMEOFFSET" + case typeBigVarChar: + return "VARCHAR" + case typeBigChar: + return "CHAR" + case typeNChar: + return "NCHAR" + case typeGuid: + return "UNIQUEIDENTIFIER" + case typeXml: + return "XML" + case typeText: + return "TEXT" + case typeNText: + return "NTEXT" + case typeImage: + return "IMAGE" + case typeVariant: + return "SQL_VARIANT" + case typeBigBinary: + return "BINARY" + default: + panic(fmt.Sprintf("not implemented makeGoLangTypeName for type %d", ti.TypeId)) + } +} + +// makes go/sql type length as described below +// It should return the length +// of the column type if the column is a variable length type. If the column is +// not a variable length type ok should return false. +// If length is not limited other than system limits, it should return math.MaxInt64. +// The following are examples of returned values for various types: +// TEXT (math.MaxInt64, true) +// varchar(10) (10, true) +// nvarchar(10) (10, true) +// decimal (0, false) +// int (0, false) +// bytea(30) (30, true) +func makeGoLangTypeLength(ti typeInfo) (int64, bool) { + switch ti.TypeId { + case typeInt1: + return 0, false + case typeInt2: + return 0, false + case typeInt4: + return 0, false + case typeInt8: + return 0, false + case typeFlt4: + return 0, false + case typeIntN: + switch ti.Size { + case 1: + return 0, false + case 2: + return 0, false + case 4: + return 0, false + case 8: + return 0, false + default: + panic("invalid size of INTNTYPE") + } + case typeFlt8: + return 0, false + case typeFltN: + switch ti.Size { + case 4: + return 0, false + case 8: + return 0, false + default: + panic("invalid size of FLNNTYPE") + } + case typeBit, typeBitN: + return 0, false + case typeDecimalN, typeNumericN: + return 0, false + case typeMoney, typeMoney4, typeMoneyN: + switch ti.Size { + case 4: + return 0, false + case 8: + return 0, false + default: + panic("invalid size of MONEYN") + } + case typeDateTim4, typeDateTime: + return 0, false + case typeDateTimeN: + switch ti.Size { + case 4: + return 0, false + case 8: + return 0, false + default: + panic("invalid size of DATETIMEN") + } + case typeDateTime2N: + return 0, false + case typeDateN: + return 0, false + case typeTimeN: + return 0, false + case typeDateTimeOffsetN: + return 0, false + case typeBigVarBin: + if ti.Size == 0xffff { + return 2147483645, true + } else { + return int64(ti.Size), true + } + case typeVarChar: + return int64(ti.Size), true + case typeBigVarChar: + if ti.Size == 0xffff { + return 2147483645, true + } else { + return int64(ti.Size), true + } + case typeBigChar: + return int64(ti.Size), true + case typeNVarChar: + if ti.Size == 0xffff { + return 2147483645 / 2, true + } else { + return int64(ti.Size) / 2, true + } + case typeNChar: + return int64(ti.Size) / 2, true + case typeGuid: + return 0, false + case typeXml: + return 1073741822, true + case typeText: + return 2147483647, true + case typeNText: + return 1073741823, true + case typeImage: + return 2147483647, true + case typeVariant: + return 0, false + case typeBigBinary: + return 0, false + default: + panic(fmt.Sprintf("not implemented makeGoLangTypeLength for type %d", ti.TypeId)) + } +} + +// makes go/sql type precision and scale as described below +// It should return the length +// of the column type if the column is a variable length type. If the column is +// not a variable length type ok should return false. +// If length is not limited other than system limits, it should return math.MaxInt64. +// The following are examples of returned values for various types: +// TEXT (math.MaxInt64, true) +// varchar(10) (10, true) +// nvarchar(10) (10, true) +// decimal (0, false) +// int (0, false) +// bytea(30) (30, true) +func makeGoLangTypePrecisionScale(ti typeInfo) (int64, int64, bool) { + switch ti.TypeId { + case typeInt1: + return 0, 0, false + case typeInt2: + return 0, 0, false + case typeInt4: + return 0, 0, false + case typeInt8: + return 0, 0, false + case typeFlt4: + return 0, 0, false + case typeIntN: + switch ti.Size { + case 1: + return 0, 0, false + case 2: + return 0, 0, false + case 4: + return 0, 0, false + case 8: + return 0, 0, false + default: + panic("invalid size of INTNTYPE") + } + case typeFlt8: + return 0, 0, false + case typeFltN: + switch ti.Size { + case 4: + return 0, 0, false + case 8: + return 0, 0, false + default: + panic("invalid size of FLNNTYPE") + } + case typeBit, typeBitN: + return 0, 0, false + case typeDecimalN, typeNumericN: + return int64(ti.Prec), int64(ti.Scale), true + case typeMoney, typeMoney4, typeMoneyN: + switch ti.Size { + case 4: + return 0, 0, false + case 8: + return 0, 0, false + default: + panic("invalid size of MONEYN") + } + case typeDateTim4, typeDateTime: + return 0, 0, false + case typeDateTimeN: + switch ti.Size { + case 4: + return 0, 0, false + case 8: + return 0, 0, false + default: + panic("invalid size of DATETIMEN") + } + case typeDateTime2N: + return 0, 0, false + case typeDateN: + return 0, 0, false + case typeTimeN: + return 0, 0, false + case typeDateTimeOffsetN: + return 0, 0, false + case typeBigVarBin: + return 0, 0, false + case typeVarChar: + return 0, 0, false + case typeBigVarChar: + return 0, 0, false + case typeBigChar: + return 0, 0, false + case typeNVarChar: + return 0, 0, false + case typeNChar: + return 0, 0, false + case typeGuid: + return 0, 0, false + case typeXml: + return 0, 0, false + case typeText: + return 0, 0, false + case typeNText: + return 0, 0, false + case typeImage: + return 0, 0, false + case typeVariant: + return 0, 0, false + case typeBigBinary: + return 0, 0, false default: - panic(fmt.Sprintf("not implemented makeDecl for type %d", ti.TypeId)) + panic(fmt.Sprintf("not implemented makeGoLangTypePrecisionScale for type %d", ti.TypeId)) } } diff --git a/vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go b/vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go new file mode 100644 index 0000000000000..c8ef3149b19f0 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go @@ -0,0 +1,74 @@ +package mssql + +import ( + "database/sql/driver" + "encoding/hex" + "errors" + "fmt" +) + +type UniqueIdentifier [16]byte + +func (u *UniqueIdentifier) Scan(v interface{}) error { + reverse := func(b []byte) { + for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { + b[i], b[j] = b[j], b[i] + } + } + + switch vt := v.(type) { + case []byte: + if len(vt) != 16 { + return errors.New("mssql: invalid UniqueIdentifier length") + } + + var raw UniqueIdentifier + + copy(raw[:], vt) + + reverse(raw[0:4]) + reverse(raw[4:6]) + reverse(raw[6:8]) + *u = raw + + return nil + case string: + if len(vt) != 36 { + return errors.New("mssql: invalid UniqueIdentifier string length") + } + + b := []byte(vt) + for i, c := range b { + switch c { + case '-': + b = append(b[:i], b[i+1:]...) + } + } + + _, err := hex.Decode(u[:], []byte(b)) + return err + default: + return fmt.Errorf("mssql: cannot convert %T to UniqueIdentifier", v) + } +} + +func (u UniqueIdentifier) Value() (driver.Value, error) { + reverse := func(b []byte) { + for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { + b[i], b[j] = b[j], b[i] + } + } + + raw := make([]byte, len(u)) + copy(raw, u[:]) + + reverse(raw[0:4]) + reverse(raw[4:6]) + reverse(raw[6:8]) + + return raw, nil +} + +func (u UniqueIdentifier) String() string { + return fmt.Sprintf("%X-%X-%X-%X-%X", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) +} diff --git a/vendor/github.com/go-sql-driver/mysql/.travis.yml b/vendor/github.com/go-sql-driver/mysql/.travis.yml index 75505f1440ea2..cc1268c3613bf 100644 --- a/vendor/github.com/go-sql-driver/mysql/.travis.yml +++ b/vendor/github.com/go-sql-driver/mysql/.travis.yml @@ -1,10 +1,10 @@ sudo: false language: go go: + - 1.7.x - 1.8.x - 1.9.x - 1.10.x - - 1.11.x - master before_install: diff --git a/vendor/github.com/go-sql-driver/mysql/AUTHORS b/vendor/github.com/go-sql-driver/mysql/AUTHORS index 5ce4f7eca1d1b..73ff68fbcf223 100644 --- a/vendor/github.com/go-sql-driver/mysql/AUTHORS +++ b/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -35,7 +35,6 @@ Hanno Braun Henri Yandell Hirotaka Yamamoto ICHINOSE Shogo -Ilia Cimpoes INADA Naoki Jacek Szwec James Harr @@ -73,9 +72,6 @@ Shuode Li Soroush Pour Stan Putrya Stanley Gunawan -Steven Hartland -Thomas Wodarek -Tom Jenkinson Xiangyu Hu Xiaobing Jiang Xiuming Chen @@ -91,4 +87,3 @@ Keybase Inc. Percona LLC Pivotal Inc. Stripe Inc. -Multiplay Ltd. diff --git a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md index 2d87d74c971be..ce1b5330a91a6 100644 --- a/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md +++ b/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -1,3 +1,14 @@ +## Version 1.4.1 (2018-11-14) + +Bugfixes: + + - Fix TIME format for binary columns (#818) + - Fix handling of empty auth plugin names (#835) + - Fix caching_sha2_password with empty password (#826) + - Fix canceled context broke mysqlConn (#862) + - Fix OldAuthSwitchRequest support (#870) + - Fix Auth Response packet for cleartext password (#887) + ## Version 1.4 (2018-06-03) Changes: diff --git a/vendor/github.com/go-sql-driver/mysql/README.md b/vendor/github.com/go-sql-driver/mysql/README.md index 341d9194c1e02..2e9b07eeb281f 100644 --- a/vendor/github.com/go-sql-driver/mysql/README.md +++ b/vendor/github.com/go-sql-driver/mysql/README.md @@ -40,7 +40,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac * Optional placeholder interpolation ## Requirements - * Go 1.8 or higher. We aim to support the 3 latest versions of Go. + * Go 1.7 or higher. We aim to support the 3 latest versions of Go. * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) --------------------------------------- @@ -328,11 +328,11 @@ Timeout for establishing connections, aka dial timeout. The value must be a deci ``` Type: bool / string -Valid Values: true, false, skip-verify, preferred, +Valid Values: true, false, skip-verify, Default: false ``` -`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). ##### `writeTimeout` diff --git a/vendor/github.com/go-sql-driver/mysql/auth.go b/vendor/github.com/go-sql-driver/mysql/auth.go index fec7040d4a2d2..14f678a87b665 100644 --- a/vendor/github.com/go-sql-driver/mysql/auth.go +++ b/vendor/github.com/go-sql-driver/mysql/auth.go @@ -360,15 +360,13 @@ func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { pubKey := mc.cfg.pubKey if pubKey == nil { // request public key from server - data, err := mc.buf.takeSmallBuffer(4 + 1) - if err != nil { - return err - } + data := mc.buf.takeSmallBuffer(4 + 1) data[4] = cachingSha2PasswordRequestPublicKey mc.writePacket(data) // parse public key - if data, err = mc.readPacket(); err != nil { + data, err := mc.readPacket() + if err != nil { return err } diff --git a/vendor/github.com/go-sql-driver/mysql/buffer.go b/vendor/github.com/go-sql-driver/mysql/buffer.go index 19486bd6f6d59..eb4748bf448d6 100644 --- a/vendor/github.com/go-sql-driver/mysql/buffer.go +++ b/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -22,17 +22,17 @@ const defaultBufSize = 4096 // The buffer is similar to bufio.Reader / Writer but zero-copy-ish // Also highly optimized for this particular use case. type buffer struct { - buf []byte // buf is a byte buffer who's length and capacity are equal. + buf []byte nc net.Conn idx int length int timeout time.Duration } -// newBuffer allocates and returns a new buffer. func newBuffer(nc net.Conn) buffer { + var b [defaultBufSize]byte return buffer{ - buf: make([]byte, defaultBufSize), + buf: b[:], nc: nc, } } @@ -105,56 +105,43 @@ func (b *buffer) readNext(need int) ([]byte, error) { return b.buf[offset:b.idx], nil } -// takeBuffer returns a buffer with the requested size. +// returns a buffer with the requested size. // If possible, a slice from the existing buffer is returned. // Otherwise a bigger buffer is made. // Only one buffer (total) can be used at a time. -func (b *buffer) takeBuffer(length int) ([]byte, error) { +func (b *buffer) takeBuffer(length int) []byte { if b.length > 0 { - return nil, ErrBusyBuffer + return nil } // test (cheap) general case first - if length <= cap(b.buf) { - return b.buf[:length], nil + if length <= defaultBufSize || length <= cap(b.buf) { + return b.buf[:length] } if length < maxPacketSize { b.buf = make([]byte, length) - return b.buf, nil + return b.buf } - - // buffer is larger than we want to store. - return make([]byte, length), nil + return make([]byte, length) } -// takeSmallBuffer is shortcut which can be used if length is -// known to be smaller than defaultBufSize. +// shortcut which can be used if the requested buffer is guaranteed to be +// smaller than defaultBufSize // Only one buffer (total) can be used at a time. -func (b *buffer) takeSmallBuffer(length int) ([]byte, error) { +func (b *buffer) takeSmallBuffer(length int) []byte { if b.length > 0 { - return nil, ErrBusyBuffer + return nil } - return b.buf[:length], nil + return b.buf[:length] } // takeCompleteBuffer returns the complete existing buffer. // This can be used if the necessary buffer size is unknown. -// cap and len of the returned buffer will be equal. // Only one buffer (total) can be used at a time. -func (b *buffer) takeCompleteBuffer() ([]byte, error) { - if b.length > 0 { - return nil, ErrBusyBuffer - } - return b.buf, nil -} - -// store stores buf, an updated buffer, if its suitable to do so. -func (b *buffer) store(buf []byte) error { +func (b *buffer) takeCompleteBuffer() []byte { if b.length > 0 { - return ErrBusyBuffer - } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) { - b.buf = buf[:cap(buf)] + return nil } - return nil + return b.buf } diff --git a/vendor/github.com/go-sql-driver/mysql/connection.go b/vendor/github.com/go-sql-driver/mysql/connection.go index fc4ec7597d90c..e57061412bc2d 100644 --- a/vendor/github.com/go-sql-driver/mysql/connection.go +++ b/vendor/github.com/go-sql-driver/mysql/connection.go @@ -9,8 +9,6 @@ package mysql import ( - "context" - "database/sql" "database/sql/driver" "io" "net" @@ -19,6 +17,16 @@ import ( "time" ) +// a copy of context.Context for Go 1.7 and earlier +type mysqlContext interface { + Done() <-chan struct{} + Err() error + + // defined in context.Context, but not used in this driver: + // Deadline() (deadline time.Time, ok bool) + // Value(key interface{}) interface{} +} + type mysqlConn struct { buf buffer netConn net.Conn @@ -35,7 +43,7 @@ type mysqlConn struct { // for context support (Go 1.8+) watching bool - watcher chan<- context.Context + watcher chan<- mysqlContext closech chan struct{} finished chan<- struct{} canceled atomicError // set non-nil if conn is canceled @@ -182,10 +190,10 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin return "", driver.ErrSkip } - buf, err := mc.buf.takeCompleteBuffer() - if err != nil { + buf := mc.buf.takeCompleteBuffer() + if buf == nil { // can not take the buffer. Something must be wrong with the connection - errLog.Print(err) + errLog.Print(ErrBusyBuffer) return "", ErrInvalidConn } buf = buf[:0] @@ -451,193 +459,3 @@ func (mc *mysqlConn) finish() { case <-mc.closech: } } - -// Ping implements driver.Pinger interface -func (mc *mysqlConn) Ping(ctx context.Context) (err error) { - if mc.closed.IsSet() { - errLog.Print(ErrInvalidConn) - return driver.ErrBadConn - } - - if err = mc.watchCancel(ctx); err != nil { - return - } - defer mc.finish() - - if err = mc.writeCommandPacket(comPing); err != nil { - return mc.markBadConn(err) - } - - return mc.readResultOK() -} - -// BeginTx implements driver.ConnBeginTx interface -func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { - if err := mc.watchCancel(ctx); err != nil { - return nil, err - } - defer mc.finish() - - if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault { - level, err := mapIsolationLevel(opts.Isolation) - if err != nil { - return nil, err - } - err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level) - if err != nil { - return nil, err - } - } - - return mc.begin(opts.ReadOnly) -} - -func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { - dargs, err := namedValueToValue(args) - if err != nil { - return nil, err - } - - if err := mc.watchCancel(ctx); err != nil { - return nil, err - } - - rows, err := mc.query(query, dargs) - if err != nil { - mc.finish() - return nil, err - } - rows.finish = mc.finish - return rows, err -} - -func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { - dargs, err := namedValueToValue(args) - if err != nil { - return nil, err - } - - if err := mc.watchCancel(ctx); err != nil { - return nil, err - } - defer mc.finish() - - return mc.Exec(query, dargs) -} - -func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { - if err := mc.watchCancel(ctx); err != nil { - return nil, err - } - - stmt, err := mc.Prepare(query) - mc.finish() - if err != nil { - return nil, err - } - - select { - default: - case <-ctx.Done(): - stmt.Close() - return nil, ctx.Err() - } - return stmt, nil -} - -func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { - dargs, err := namedValueToValue(args) - if err != nil { - return nil, err - } - - if err := stmt.mc.watchCancel(ctx); err != nil { - return nil, err - } - - rows, err := stmt.query(dargs) - if err != nil { - stmt.mc.finish() - return nil, err - } - rows.finish = stmt.mc.finish - return rows, err -} - -func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { - dargs, err := namedValueToValue(args) - if err != nil { - return nil, err - } - - if err := stmt.mc.watchCancel(ctx); err != nil { - return nil, err - } - defer stmt.mc.finish() - - return stmt.Exec(dargs) -} - -func (mc *mysqlConn) watchCancel(ctx context.Context) error { - if mc.watching { - // Reach here if canceled, - // so the connection is already invalid - mc.cleanup() - return nil - } - // When ctx is already cancelled, don't watch it. - if err := ctx.Err(); err != nil { - return err - } - // When ctx is not cancellable, don't watch it. - if ctx.Done() == nil { - return nil - } - // When watcher is not alive, can't watch it. - if mc.watcher == nil { - return nil - } - - mc.watching = true - mc.watcher <- ctx - return nil -} - -func (mc *mysqlConn) startWatcher() { - watcher := make(chan context.Context, 1) - mc.watcher = watcher - finished := make(chan struct{}) - mc.finished = finished - go func() { - for { - var ctx context.Context - select { - case ctx = <-watcher: - case <-mc.closech: - return - } - - select { - case <-ctx.Done(): - mc.cancel(ctx.Err()) - case <-finished: - case <-mc.closech: - return - } - } - }() -} - -func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) { - nv.Value, err = converter{}.ConvertValue(nv.Value) - return -} - -// ResetSession implements driver.SessionResetter. -// (From Go 1.10) -func (mc *mysqlConn) ResetSession(ctx context.Context) error { - if mc.closed.IsSet() { - return driver.ErrBadConn - } - return nil -} diff --git a/vendor/github.com/go-sql-driver/mysql/connection_go18.go b/vendor/github.com/go-sql-driver/mysql/connection_go18.go new file mode 100644 index 0000000000000..ce52c7d1671aa --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/connection_go18.go @@ -0,0 +1,207 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.8 + +package mysql + +import ( + "context" + "database/sql" + "database/sql/driver" +) + +// Ping implements driver.Pinger interface +func (mc *mysqlConn) Ping(ctx context.Context) (err error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + if err = mc.watchCancel(ctx); err != nil { + return + } + defer mc.finish() + + if err = mc.writeCommandPacket(comPing); err != nil { + return + } + + return mc.readResultOK() +} + +// BeginTx implements driver.ConnBeginTx interface +func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault { + level, err := mapIsolationLevel(opts.Isolation) + if err != nil { + return nil, err + } + err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level) + if err != nil { + return nil, err + } + } + + return mc.begin(opts.ReadOnly) +} + +func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := mc.query(query, dargs) + if err != nil { + mc.finish() + return nil, err + } + rows.finish = mc.finish + return rows, err +} + +func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + return mc.Exec(query, dargs) +} + +func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + stmt, err := mc.Prepare(query) + mc.finish() + if err != nil { + return nil, err + } + + select { + default: + case <-ctx.Done(): + stmt.Close() + return nil, ctx.Err() + } + return stmt, nil +} + +func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := stmt.query(dargs) + if err != nil { + stmt.mc.finish() + return nil, err + } + rows.finish = stmt.mc.finish + return rows, err +} + +func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + defer stmt.mc.finish() + + return stmt.Exec(dargs) +} + +func (mc *mysqlConn) watchCancel(ctx context.Context) error { + if mc.watching { + // Reach here if canceled, + // so the connection is already invalid + mc.cleanup() + return nil + } + // When ctx is already cancelled, don't watch it. + if err := ctx.Err(); err != nil { + return err + } + // When ctx is not cancellable, don't watch it. + if ctx.Done() == nil { + return nil + } + // When watcher is not alive, can't watch it. + if mc.watcher == nil { + return nil + } + + mc.watching = true + mc.watcher <- ctx + return nil +} + +func (mc *mysqlConn) startWatcher() { + watcher := make(chan mysqlContext, 1) + mc.watcher = watcher + finished := make(chan struct{}) + mc.finished = finished + go func() { + for { + var ctx mysqlContext + select { + case ctx = <-watcher: + case <-mc.closech: + return + } + + select { + case <-ctx.Done(): + mc.cancel(ctx.Err()) + case <-finished: + case <-mc.closech: + return + } + } + }() +} + +func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) { + nv.Value, err = converter{}.ConvertValue(nv.Value) + return +} + +// ResetSession implements driver.SessionResetter. +// (From Go 1.10) +func (mc *mysqlConn) ResetSession(ctx context.Context) error { + if mc.closed.IsSet() { + return driver.ErrBadConn + } + return nil +} diff --git a/vendor/github.com/go-sql-driver/mysql/driver.go b/vendor/github.com/go-sql-driver/mysql/driver.go index 9f4967087f518..e9ede2c8dad6a 100644 --- a/vendor/github.com/go-sql-driver/mysql/driver.go +++ b/vendor/github.com/go-sql-driver/mysql/driver.go @@ -23,6 +23,11 @@ import ( "sync" ) +// watcher interface is used for context support (From Go 1.8) +type watcher interface { + startWatcher() +} + // MySQLDriver is exported to make the driver directly accessible. // In general the driver is used via the database/sql package. type MySQLDriver struct{} @@ -50,7 +55,7 @@ func RegisterDial(net string, dial DialFunc) { // Open new Connection. // See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how -// the DSN string is formatted +// the DSN string is formated func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { var err error @@ -77,10 +82,6 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { mc.netConn, err = nd.Dial(mc.cfg.Net, mc.cfg.Addr) } if err != nil { - if nerr, ok := err.(net.Error); ok && nerr.Temporary() { - errLog.Print("net.Error from Dial()': ", nerr.Error()) - return nil, driver.ErrBadConn - } return nil, err } @@ -95,7 +96,9 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { } // Call startWatcher for context support (From Go 1.8) - mc.startWatcher() + if s, ok := interface{}(mc).(watcher); ok { + s.startWatcher() + } mc.buf = newBuffer(mc.netConn) diff --git a/vendor/github.com/go-sql-driver/mysql/dsn.go b/vendor/github.com/go-sql-driver/mysql/dsn.go index b9134722eb0fc..be014babe3335 100644 --- a/vendor/github.com/go-sql-driver/mysql/dsn.go +++ b/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -560,7 +560,7 @@ func parseDSNParams(cfg *Config, params string) (err error) { } else { cfg.TLSConfig = "false" } - } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" { + } else if vl := strings.ToLower(value); vl == "skip-verify" { cfg.TLSConfig = vl cfg.tls = &tls.Config{InsecureSkipVerify: true} } else { diff --git a/vendor/github.com/go-sql-driver/mysql/packets.go b/vendor/github.com/go-sql-driver/mysql/packets.go index 5e0853767d5a5..9ed64085092e4 100644 --- a/vendor/github.com/go-sql-driver/mysql/packets.go +++ b/vendor/github.com/go-sql-driver/mysql/packets.go @@ -51,7 +51,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) { mc.sequence++ // packets with length 0 terminate a previous packet which is a - // multiple of (2^24)-1 bytes long + // multiple of (2^24)−1 bytes long if pktLen == 0 { // there was no previous packet if prevData == nil { @@ -194,11 +194,7 @@ func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err erro return nil, "", ErrOldProtocol } if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { - if mc.cfg.TLSConfig == "preferred" { - mc.cfg.tls = nil - } else { - return nil, "", ErrNoTLS - } + return nil, "", ErrNoTLS } pos += 2 @@ -290,10 +286,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string } // Calculate packet length and get buffer with that size - data, err := mc.buf.takeSmallBuffer(pktLen + 4) - if err != nil { + data := mc.buf.takeSmallBuffer(pktLen + 4) + if data == nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + errLog.Print(ErrBusyBuffer) return errBadConnNoWrite } @@ -371,10 +367,10 @@ func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { pktLen := 4 + len(authData) - data, err := mc.buf.takeSmallBuffer(pktLen) - if err != nil { + data := mc.buf.takeSmallBuffer(pktLen) + if data == nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + errLog.Print(ErrBusyBuffer) return errBadConnNoWrite } @@ -391,10 +387,10 @@ func (mc *mysqlConn) writeCommandPacket(command byte) error { // Reset Packet Sequence mc.sequence = 0 - data, err := mc.buf.takeSmallBuffer(4 + 1) - if err != nil { + data := mc.buf.takeSmallBuffer(4 + 1) + if data == nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + errLog.Print(ErrBusyBuffer) return errBadConnNoWrite } @@ -410,10 +406,10 @@ func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { mc.sequence = 0 pktLen := 1 + len(arg) - data, err := mc.buf.takeBuffer(pktLen + 4) - if err != nil { + data := mc.buf.takeBuffer(pktLen + 4) + if data == nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + errLog.Print(ErrBusyBuffer) return errBadConnNoWrite } @@ -431,10 +427,10 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { // Reset Packet Sequence mc.sequence = 0 - data, err := mc.buf.takeSmallBuffer(4 + 1 + 4) - if err != nil { + data := mc.buf.takeSmallBuffer(4 + 1 + 4) + if data == nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + errLog.Print(ErrBusyBuffer) return errBadConnNoWrite } @@ -887,7 +883,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { const minPktLen = 4 + 1 + 4 + 1 + 4 mc := stmt.mc - // Determine threshold dynamically to avoid packet size shortage. + // Determine threshould dynamically to avoid packet size shortage. longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1) if longDataSize < 64 { longDataSize = 64 @@ -897,17 +893,15 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { mc.sequence = 0 var data []byte - var err error if len(args) == 0 { - data, err = mc.buf.takeBuffer(minPktLen) + data = mc.buf.takeBuffer(minPktLen) } else { - data, err = mc.buf.takeCompleteBuffer() - // In this case the len(data) == cap(data) which is used to optimise the flow below. + data = mc.buf.takeCompleteBuffer() } - if err != nil { + if data == nil { // cannot take the buffer. Something must be wrong with the connection - errLog.Print(err) + errLog.Print(ErrBusyBuffer) return errBadConnNoWrite } @@ -933,7 +927,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { pos := minPktLen var nullMask []byte - if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) { + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= len(data) { // buffer has to be extended but we don't know by how much so // we depend on append after all data with known sizes fit. // We stop at that because we deal with a lot of columns here @@ -942,11 +936,10 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { copy(tmp[:pos], data[:pos]) data = tmp nullMask = data[pos : pos+maskLen] - // No need to clean nullMask as make ensures that. pos += maskLen } else { nullMask = data[pos : pos+maskLen] - for i := range nullMask { + for i := 0; i < maskLen; i++ { nullMask[i] = 0 } pos += maskLen @@ -1083,10 +1076,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { // In that case we must build the data packet with the new values buffer if valuesCap != cap(paramValues) { data = append(data[:pos], paramValues...) - if err = mc.buf.store(data); err != nil { - errLog.Print(err) - return errBadConnNoWrite - } + mc.buf.buf = data } pos += len(paramValues) diff --git a/vendor/github.com/go-sql-driver/mysql/utils.go b/vendor/github.com/go-sql-driver/mysql/utils.go index cb3650bb9b8db..ca5d47d825d08 100644 --- a/vendor/github.com/go-sql-driver/mysql/utils.go +++ b/vendor/github.com/go-sql-driver/mysql/utils.go @@ -10,10 +10,8 @@ package mysql import ( "crypto/tls" - "database/sql" "database/sql/driver" "encoding/binary" - "errors" "fmt" "io" "strconv" @@ -82,7 +80,7 @@ func DeregisterTLSConfig(key string) { func getTLSConfigClone(key string) (config *tls.Config) { tlsConfigLock.RLock() if v, ok := tlsConfigRegistry[key]; ok { - config = v.Clone() + config = cloneTLSConfig(v) } tlsConfigLock.RUnlock() return @@ -726,30 +724,3 @@ func (ae *atomicError) Value() error { } return nil } - -func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) { - dargs := make([]driver.Value, len(named)) - for n, param := range named { - if len(param.Name) > 0 { - // TODO: support the use of Named Parameters #561 - return nil, errors.New("mysql: driver does not support the use of Named Parameters") - } - dargs[n] = param.Value - } - return dargs, nil -} - -func mapIsolationLevel(level driver.IsolationLevel) (string, error) { - switch sql.IsolationLevel(level) { - case sql.LevelRepeatableRead: - return "REPEATABLE READ", nil - case sql.LevelReadCommitted: - return "READ COMMITTED", nil - case sql.LevelReadUncommitted: - return "READ UNCOMMITTED", nil - case sql.LevelSerializable: - return "SERIALIZABLE", nil - default: - return "", fmt.Errorf("mysql: unsupported isolation level: %v", level) - } -} diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go17.go b/vendor/github.com/go-sql-driver/mysql/utils_go17.go new file mode 100644 index 0000000000000..f5956345674d6 --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils_go17.go @@ -0,0 +1,40 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.7 +// +build !go1.8 + +package mysql + +import "crypto/tls" + +func cloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/go-sql-driver/mysql/utils_go18.go b/vendor/github.com/go-sql-driver/mysql/utils_go18.go new file mode 100644 index 0000000000000..c35c2a6aabfef --- /dev/null +++ b/vendor/github.com/go-sql-driver/mysql/utils_go18.go @@ -0,0 +1,50 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.8 + +package mysql + +import ( + "crypto/tls" + "database/sql" + "database/sql/driver" + "errors" + "fmt" +) + +func cloneTLSConfig(c *tls.Config) *tls.Config { + return c.Clone() +} + +func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) { + dargs := make([]driver.Value, len(named)) + for n, param := range named { + if len(param.Name) > 0 { + // TODO: support the use of Named Parameters #561 + return nil, errors.New("mysql: driver does not support the use of Named Parameters") + } + dargs[n] = param.Value + } + return dargs, nil +} + +func mapIsolationLevel(level driver.IsolationLevel) (string, error) { + switch sql.IsolationLevel(level) { + case sql.LevelRepeatableRead: + return "REPEATABLE READ", nil + case sql.LevelReadCommitted: + return "READ COMMITTED", nil + case sql.LevelReadUncommitted: + return "READ UNCOMMITTED", nil + case sql.LevelSerializable: + return "SERIALIZABLE", nil + default: + return "", fmt.Errorf("mysql: unsupported isolation level: %v", level) + } +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 2b8a569cba564..ee9974b3ba9a1 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,3 +1,5 @@ +# cloud.google.com/go v0.34.0 +cloud.google.com/go/civil # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml # github.com/PuerkitoBio/goquery v0.0.0-20170324135448-ed7d758e9a34 @@ -85,8 +87,9 @@ github.com/couchbase/vellum/utf8 github.com/couchbaselabs/go-couchbase # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 => github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449 +# github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 github.com/denisenkom/go-mssqldb +github.com/denisenkom/go-mssqldb/internal/cp # github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac github.com/dgrijalva/jwt-go # github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 @@ -147,7 +150,7 @@ github.com/go-redis/redis/internal/hashtag github.com/go-redis/redis/internal/pool github.com/go-redis/redis/internal/proto github.com/go-redis/redis/internal/util -# github.com/go-sql-driver/mysql v1.4.1 => github.com/go-sql-driver/mysql v0.0.0-20181218123637-c45f530f8e7f +# github.com/go-sql-driver/mysql v1.4.1 github.com/go-sql-driver/mysql # github.com/go-xorm/xorm v0.7.3-0.20190620151208-f1b4f8368459 github.com/go-xorm/xorm From a08b8517bdea46e1af52840929bf058fc5a89fc1 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sat, 22 Jun 2019 15:17:35 +0800 Subject: [PATCH 3/5] fix fmt --- models/issue.go | 2 +- models/issue_comment.go | 2 +- models/issue_reaction.go | 2 +- models/issue_tracked_time.go | 2 +- models/login_source.go | 2 +- models/migrations/v31.go | 2 +- models/migrations/v38.go | 2 +- models/migrations/v75.go | 2 +- models/migrations/v78.go | 2 +- models/migrations/v85.go | 2 +- models/models.go | 2 +- models/org.go | 2 +- models/repo.go | 2 +- models/repo_unit.go | 2 +- models/review.go | 2 +- models/ssh_key.go | 2 +- models/unit_tests.go | 2 +- models/user.go | 4 ++-- 18 files changed, 19 insertions(+), 19 deletions(-) diff --git a/models/issue.go b/models/issue.go index 85544f38ee345..b5504beb71bcd 100644 --- a/models/issue.go +++ b/models/issue.go @@ -19,8 +19,8 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "xorm.io/builder" "github.com/go-xorm/xorm" + "xorm.io/builder" ) // Issue represents an issue or pull request of repository. diff --git a/models/issue_comment.go b/models/issue_comment.go index ec423c19d51c3..c9f1bd9d5f22d 100644 --- a/models/issue_comment.go +++ b/models/issue_comment.go @@ -15,8 +15,8 @@ import ( "code.gitea.io/gitea/modules/markup/markdown" "code.gitea.io/gitea/modules/setting" "github.com/Unknwon/com" - "xorm.io/builder" "github.com/go-xorm/xorm" + "xorm.io/builder" api "code.gitea.io/gitea/modules/structs" diff --git a/models/issue_reaction.go b/models/issue_reaction.go index c3f45b649f217..e0df6f757b5bf 100644 --- a/models/issue_reaction.go +++ b/models/issue_reaction.go @@ -11,8 +11,8 @@ import ( "code.gitea.io/gitea/modules/setting" "code.gitea.io/gitea/modules/util" - "xorm.io/builder" "github.com/go-xorm/xorm" + "xorm.io/builder" ) // Reaction represents a reactions on issues and comments. diff --git a/models/issue_tracked_time.go b/models/issue_tracked_time.go index 15c51fbfec2e9..5482a45f2a91f 100644 --- a/models/issue_tracked_time.go +++ b/models/issue_tracked_time.go @@ -10,8 +10,8 @@ import ( "code.gitea.io/gitea/modules/setting" api "code.gitea.io/gitea/modules/structs" - "xorm.io/builder" "github.com/go-xorm/xorm" + "xorm.io/builder" ) // TrackedTime represents a time that was spent for a specific issue. diff --git a/models/login_source.go b/models/login_source.go index c51e1c1fbee69..626c23277223d 100644 --- a/models/login_source.go +++ b/models/login_source.go @@ -15,8 +15,8 @@ import ( "strings" "github.com/Unknwon/com" - "xorm.io/core" "github.com/go-xorm/xorm" + "xorm.io/core" "code.gitea.io/gitea/modules/auth/ldap" "code.gitea.io/gitea/modules/auth/oauth2" diff --git a/models/migrations/v31.go b/models/migrations/v31.go index 354d9ed0c18ff..d6cea4c51b0b5 100644 --- a/models/migrations/v31.go +++ b/models/migrations/v31.go @@ -8,8 +8,8 @@ import ( "fmt" "time" - "xorm.io/core" "github.com/go-xorm/xorm" + "xorm.io/core" ) func addLoginSourceSyncEnabledColumn(x *xorm.Engine) error { diff --git a/models/migrations/v38.go b/models/migrations/v38.go index d75cf4ea6eec0..6060b70fe870f 100644 --- a/models/migrations/v38.go +++ b/models/migrations/v38.go @@ -9,8 +9,8 @@ import ( "code.gitea.io/gitea/models" - "xorm.io/core" "github.com/go-xorm/xorm" + "xorm.io/core" ) func removeCommitsUnitType(x *xorm.Engine) (err error) { diff --git a/models/migrations/v75.go b/models/migrations/v75.go index 01e1b2a82e3a6..58d1d34c98634 100644 --- a/models/migrations/v75.go +++ b/models/migrations/v75.go @@ -5,8 +5,8 @@ package migrations import ( - "xorm.io/builder" "github.com/go-xorm/xorm" + "xorm.io/builder" ) func clearNonusedData(x *xorm.Engine) error { diff --git a/models/migrations/v78.go b/models/migrations/v78.go index 26474c878ff03..511a4f57fa87f 100644 --- a/models/migrations/v78.go +++ b/models/migrations/v78.go @@ -10,8 +10,8 @@ import ( "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/log" - "xorm.io/core" "github.com/go-xorm/xorm" + "xorm.io/core" ) func renameRepoIsBareToIsEmpty(x *xorm.Engine) error { diff --git a/models/migrations/v85.go b/models/migrations/v85.go index a1640ed9ce4b6..b8d0ee5443d03 100644 --- a/models/migrations/v85.go +++ b/models/migrations/v85.go @@ -7,8 +7,8 @@ package migrations import ( "fmt" - "xorm.io/core" "github.com/go-xorm/xorm" + "xorm.io/core" "code.gitea.io/gitea/models" "code.gitea.io/gitea/modules/generate" diff --git a/models/models.go b/models/models.go index 858723889624e..3b3d8ec30a1ae 100644 --- a/models/models.go +++ b/models/models.go @@ -20,8 +20,8 @@ import ( // Needed for the MySQL driver _ "github.com/go-sql-driver/mysql" - "xorm.io/core" "github.com/go-xorm/xorm" + "xorm.io/core" // Needed for the Postgresql driver _ "github.com/lib/pq" diff --git a/models/org.go b/models/org.go index e0be575df0eb1..d86109de57e10 100644 --- a/models/org.go +++ b/models/org.go @@ -15,8 +15,8 @@ import ( "code.gitea.io/gitea/modules/structs" "github.com/Unknwon/com" - "xorm.io/builder" "github.com/go-xorm/xorm" + "xorm.io/builder" ) var ( diff --git a/models/repo.go b/models/repo.go index d0d010d77747a..2f732d0e2679d 100644 --- a/models/repo.go +++ b/models/repo.go @@ -37,9 +37,9 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "xorm.io/builder" "github.com/go-xorm/xorm" ini "gopkg.in/ini.v1" + "xorm.io/builder" ) var repoWorkingPool = sync.NewExclusivePool() diff --git a/models/repo_unit.go b/models/repo_unit.go index 9c5da32fce17f..80126270deea7 100644 --- a/models/repo_unit.go +++ b/models/repo_unit.go @@ -10,8 +10,8 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "xorm.io/core" "github.com/go-xorm/xorm" + "xorm.io/core" ) // RepoUnit describes all units of a repository diff --git a/models/review.go b/models/review.go index 17241f024f648..458d58152e283 100644 --- a/models/review.go +++ b/models/review.go @@ -11,9 +11,9 @@ import ( api "code.gitea.io/gitea/modules/structs" "code.gitea.io/gitea/modules/util" + "github.com/go-xorm/xorm" "xorm.io/builder" "xorm.io/core" - "github.com/go-xorm/xorm" ) // ReviewType defines the sort of feedback a review gives diff --git a/models/ssh_key.go b/models/ssh_key.go index 87c47b6db93bd..ceb4d9756075e 100644 --- a/models/ssh_key.go +++ b/models/ssh_key.go @@ -25,9 +25,9 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "xorm.io/builder" "github.com/go-xorm/xorm" "golang.org/x/crypto/ssh" + "xorm.io/builder" ) const ( diff --git a/models/unit_tests.go b/models/unit_tests.go index 01ad782f3bbf3..330dc5ee4e096 100644 --- a/models/unit_tests.go +++ b/models/unit_tests.go @@ -18,10 +18,10 @@ import ( "code.gitea.io/gitea/modules/setting" "github.com/Unknwon/com" - "xorm.io/core" "github.com/go-xorm/xorm" "github.com/stretchr/testify/assert" "gopkg.in/testfixtures.v2" + "xorm.io/core" ) // NonexistentID an ID that will never exist diff --git a/models/user.go b/models/user.go index 9723af125a1d5..aa392b1ea9bc9 100644 --- a/models/user.go +++ b/models/user.go @@ -32,11 +32,11 @@ import ( "code.gitea.io/gitea/modules/util" "github.com/Unknwon/com" - "xorm.io/builder" - "xorm.io/core" "github.com/go-xorm/xorm" "golang.org/x/crypto/pbkdf2" "golang.org/x/crypto/ssh" + "xorm.io/builder" + "xorm.io/core" ) // UserType defines the user type From 5f6ebcfa95c7df438614fba12dc8b34a76505218 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sun, 23 Jun 2019 09:18:12 +0800 Subject: [PATCH 4/5] fix Consistency --- models/consistency.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/models/consistency.go b/models/consistency.go index 0c279eaaf8528..f9fa3028fd9c8 100644 --- a/models/consistency.go +++ b/models/consistency.go @@ -39,7 +39,7 @@ func CheckConsistencyFor(t *testing.T, beansToCheck ...interface{}) { ptrToSliceValue := reflect.New(sliceType) ptrToSliceValue.Elem().Set(sliceValue) - assert.NoError(t, x.Where(bean).Find(ptrToSliceValue.Interface())) + assert.NoError(t, x.Table(bean).Find(ptrToSliceValue.Interface())) sliceValue = ptrToSliceValue.Elem() for i := 0; i < sliceValue.Len(); i++ { From b7b2d67e72e745d03dd859e8e43bb2bbcb7849f1 Mon Sep 17 00:00:00 2001 From: Lunny Xiao Date: Sun, 23 Jun 2019 18:34:47 +0800 Subject: [PATCH 5/5] fix tests --- go.mod | 2 + go.sum | 4 +- vendor/cloud.google.com/go/AUTHORS | 15 - vendor/cloud.google.com/go/CONTRIBUTORS | 40 - vendor/cloud.google.com/go/LICENSE | 202 ---- vendor/cloud.google.com/go/civil/civil.go | 277 ------ .../denisenkom/go-mssqldb/README.md | 247 +---- .../denisenkom/go-mssqldb/appveyor.yml | 48 - .../github.com/denisenkom/go-mssqldb/buf.go | 154 ++-- .../denisenkom/go-mssqldb/bulkcopy.go | 554 ----------- .../denisenkom/go-mssqldb/bulkcopy_sql.go | 93 -- .../go-mssqldb/{internal/cp => }/charset.go | 8 +- .../denisenkom/go-mssqldb/collation.go | 39 + .../denisenkom/go-mssqldb/convert.go | 306 ------- .../go-mssqldb/{internal/cp => }/cp1250.go | 2 +- .../go-mssqldb/{internal/cp => }/cp1251.go | 2 +- .../go-mssqldb/{internal/cp => }/cp1252.go | 2 +- .../go-mssqldb/{internal/cp => }/cp1253.go | 2 +- .../go-mssqldb/{internal/cp => }/cp1254.go | 2 +- .../go-mssqldb/{internal/cp => }/cp1255.go | 2 +- .../go-mssqldb/{internal/cp => }/cp1256.go | 2 +- .../go-mssqldb/{internal/cp => }/cp1257.go | 2 +- .../go-mssqldb/{internal/cp => }/cp1258.go | 2 +- .../go-mssqldb/{internal/cp => }/cp437.go | 2 +- .../go-mssqldb/{internal/cp => }/cp850.go | 2 +- .../go-mssqldb/{internal/cp => }/cp874.go | 2 +- .../go-mssqldb/{internal/cp => }/cp932.go | 2 +- .../go-mssqldb/{internal/cp => }/cp936.go | 2 +- .../go-mssqldb/{internal/cp => }/cp949.go | 2 +- .../go-mssqldb/{internal/cp => }/cp950.go | 2 +- .../denisenkom/go-mssqldb/decimal.go | 22 +- .../github.com/denisenkom/go-mssqldb/doc.go | 14 - .../go-mssqldb/internal/cp/collation.go | 20 - .../github.com/denisenkom/go-mssqldb/log.go | 21 +- .../github.com/denisenkom/go-mssqldb/mssql.go | 863 ++++-------------- .../denisenkom/go-mssqldb/mssql_go1.3.go | 11 + .../denisenkom/go-mssqldb/mssql_go1.3pre.go | 11 + .../denisenkom/go-mssqldb/mssql_go110.go | 47 - .../denisenkom/go-mssqldb/mssql_go19.go | 171 ---- .../denisenkom/go-mssqldb/mssql_go19pre.go | 16 - .../github.com/denisenkom/go-mssqldb/net.go | 22 +- .../github.com/denisenkom/go-mssqldb/ntlm.go | 84 +- .../denisenkom/go-mssqldb/parser.go | 40 +- .../github.com/denisenkom/go-mssqldb/rpc.go | 47 +- .../denisenkom/go-mssqldb/sspi_windows.go | 2 +- .../github.com/denisenkom/go-mssqldb/tds.go | 407 ++------- .../github.com/denisenkom/go-mssqldb/token.go | 371 ++------ .../denisenkom/go-mssqldb/token_string.go | 53 -- .../github.com/denisenkom/go-mssqldb/tran.go | 27 +- .../github.com/denisenkom/go-mssqldb/types.go | 835 ++--------------- .../denisenkom/go-mssqldb/uniqueidentifier.go | 74 -- vendor/modules.txt | 5 +- 52 files changed, 646 insertions(+), 4538 deletions(-) delete mode 100644 vendor/cloud.google.com/go/AUTHORS delete mode 100644 vendor/cloud.google.com/go/CONTRIBUTORS delete mode 100644 vendor/cloud.google.com/go/LICENSE delete mode 100644 vendor/cloud.google.com/go/civil/civil.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/appveyor.yml delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/charset.go (94%) create mode 100644 vendor/github.com/denisenkom/go-mssqldb/collation.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/convert.go rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp1250.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp1251.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp1252.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp1253.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp1254.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp1255.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp1256.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp1257.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp1258.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp437.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp850.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp874.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp932.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp936.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp949.go (99%) rename vendor/github.com/denisenkom/go-mssqldb/{internal/cp => }/cp950.go (99%) delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/doc.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/internal/cp/collation.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go create mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/token_string.go delete mode 100644 vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go diff --git a/go.mod b/go.mod index 933b48077df4f..6087e52d3e1f3 100644 --- a/go.mod +++ b/go.mod @@ -139,3 +139,5 @@ require ( xorm.io/builder v0.3.5 xorm.io/core v0.6.3 ) + +replace github.com/denisenkom/go-mssqldb => github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449 diff --git a/go.sum b/go.sum index 14fac85b4c4d3..9d5e2c9c50e72 100644 --- a/go.sum +++ b/go.sum @@ -60,8 +60,8 @@ github.com/cznic/strutil v0.0.0-20181122101858-275e90344537/go.mod h1:AHHPPPXTw0 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 h1:b5OnbZD49x9g+/FcYbs/vukEt8C/jUbGhCJ3uduQmu8= -github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= +github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449 h1:JpA+YMG4JLW8nzLmU05mTiuB0O17xHGxpWolEZ0zDuA= +github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc= github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac h1:xrQJVwQCGqDvOO7/0+RyIq5J2M3Q4ZF7Ug/BMQtML1E= github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712 h1:aaQcKT9WumO6JEJcRyTqFVq4XUZiUcKR2/GI31TOcz8= diff --git a/vendor/cloud.google.com/go/AUTHORS b/vendor/cloud.google.com/go/AUTHORS deleted file mode 100644 index c364af1da0953..0000000000000 --- a/vendor/cloud.google.com/go/AUTHORS +++ /dev/null @@ -1,15 +0,0 @@ -# This is the official list of cloud authors for copyright purposes. -# This file is distinct from the CONTRIBUTORS files. -# See the latter for an explanation. - -# Names should be added to this file as: -# Name or Organization -# The email address is not required for organizations. - -Filippo Valsorda -Google Inc. -Ingo Oeser -Palm Stone Games, Inc. -Paweł Knap -Péter Szilágyi -Tyler Treat diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS deleted file mode 100644 index 3b3cbed98e9a9..0000000000000 --- a/vendor/cloud.google.com/go/CONTRIBUTORS +++ /dev/null @@ -1,40 +0,0 @@ -# People who have agreed to one of the CLAs and can contribute patches. -# The AUTHORS file lists the copyright holders; this file -# lists people. For example, Google employees are listed here -# but not in AUTHORS, because Google holds the copyright. -# -# https://developers.google.com/open-source/cla/individual -# https://developers.google.com/open-source/cla/corporate -# -# Names should be added to this file as: -# Name - -# Keep the list alphabetically sorted. - -Alexis Hunt -Andreas Litt -Andrew Gerrand -Brad Fitzpatrick -Burcu Dogan -Dave Day -David Sansome -David Symonds -Filippo Valsorda -Glenn Lewis -Ingo Oeser -James Hall -Johan Euphrosine -Jonathan Amsterdam -Kunpei Sakai -Luna Duclos -Magnus Hiie -Mario Castro -Michael McGreevy -Omar Jarjur -Paweł Knap -Péter Szilágyi -Sarah Adams -Thanatat Tamtan -Toby Burress -Tuo Shan -Tyler Treat diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE deleted file mode 100644 index d645695673349..0000000000000 --- a/vendor/cloud.google.com/go/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/cloud.google.com/go/civil/civil.go b/vendor/cloud.google.com/go/civil/civil.go deleted file mode 100644 index 29272ef26a313..0000000000000 --- a/vendor/cloud.google.com/go/civil/civil.go +++ /dev/null @@ -1,277 +0,0 @@ -// Copyright 2016 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package civil implements types for civil time, a time-zone-independent -// representation of time that follows the rules of the proleptic -// Gregorian calendar with exactly 24-hour days, 60-minute hours, and 60-second -// minutes. -// -// Because they lack location information, these types do not represent unique -// moments or intervals of time. Use time.Time for that purpose. -package civil - -import ( - "fmt" - "time" -) - -// A Date represents a date (year, month, day). -// -// This type does not include location information, and therefore does not -// describe a unique 24-hour timespan. -type Date struct { - Year int // Year (e.g., 2014). - Month time.Month // Month of the year (January = 1, ...). - Day int // Day of the month, starting at 1. -} - -// DateOf returns the Date in which a time occurs in that time's location. -func DateOf(t time.Time) Date { - var d Date - d.Year, d.Month, d.Day = t.Date() - return d -} - -// ParseDate parses a string in RFC3339 full-date format and returns the date value it represents. -func ParseDate(s string) (Date, error) { - t, err := time.Parse("2006-01-02", s) - if err != nil { - return Date{}, err - } - return DateOf(t), nil -} - -// String returns the date in RFC3339 full-date format. -func (d Date) String() string { - return fmt.Sprintf("%04d-%02d-%02d", d.Year, d.Month, d.Day) -} - -// IsValid reports whether the date is valid. -func (d Date) IsValid() bool { - return DateOf(d.In(time.UTC)) == d -} - -// In returns the time corresponding to time 00:00:00 of the date in the location. -// -// In is always consistent with time.Date, even when time.Date returns a time -// on a different day. For example, if loc is America/Indiana/Vincennes, then both -// time.Date(1955, time.May, 1, 0, 0, 0, 0, loc) -// and -// civil.Date{Year: 1955, Month: time.May, Day: 1}.In(loc) -// return 23:00:00 on April 30, 1955. -// -// In panics if loc is nil. -func (d Date) In(loc *time.Location) time.Time { - return time.Date(d.Year, d.Month, d.Day, 0, 0, 0, 0, loc) -} - -// AddDays returns the date that is n days in the future. -// n can also be negative to go into the past. -func (d Date) AddDays(n int) Date { - return DateOf(d.In(time.UTC).AddDate(0, 0, n)) -} - -// DaysSince returns the signed number of days between the date and s, not including the end day. -// This is the inverse operation to AddDays. -func (d Date) DaysSince(s Date) (days int) { - // We convert to Unix time so we do not have to worry about leap seconds: - // Unix time increases by exactly 86400 seconds per day. - deltaUnix := d.In(time.UTC).Unix() - s.In(time.UTC).Unix() - return int(deltaUnix / 86400) -} - -// Before reports whether d1 occurs before d2. -func (d1 Date) Before(d2 Date) bool { - if d1.Year != d2.Year { - return d1.Year < d2.Year - } - if d1.Month != d2.Month { - return d1.Month < d2.Month - } - return d1.Day < d2.Day -} - -// After reports whether d1 occurs after d2. -func (d1 Date) After(d2 Date) bool { - return d2.Before(d1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of d.String(). -func (d Date) MarshalText() ([]byte, error) { - return []byte(d.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The date is expected to be a string in a format accepted by ParseDate. -func (d *Date) UnmarshalText(data []byte) error { - var err error - *d, err = ParseDate(string(data)) - return err -} - -// A Time represents a time with nanosecond precision. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -// -// This type exists to represent the TIME type in storage-based APIs like BigQuery. -// Most operations on Times are unlikely to be meaningful. Prefer the DateTime type. -type Time struct { - Hour int // The hour of the day in 24-hour format; range [0-23] - Minute int // The minute of the hour; range [0-59] - Second int // The second of the minute; range [0-59] - Nanosecond int // The nanosecond of the second; range [0-999999999] -} - -// TimeOf returns the Time representing the time of day in which a time occurs -// in that time's location. It ignores the date. -func TimeOf(t time.Time) Time { - var tm Time - tm.Hour, tm.Minute, tm.Second = t.Clock() - tm.Nanosecond = t.Nanosecond() - return tm -} - -// ParseTime parses a string and returns the time value it represents. -// ParseTime accepts an extended form of the RFC3339 partial-time format. After -// the HH:MM:SS part of the string, an optional fractional part may appear, -// consisting of a decimal point followed by one to nine decimal digits. -// (RFC3339 admits only one digit after the decimal point). -func ParseTime(s string) (Time, error) { - t, err := time.Parse("15:04:05.999999999", s) - if err != nil { - return Time{}, err - } - return TimeOf(t), nil -} - -// String returns the date in the format described in ParseTime. If Nanoseconds -// is zero, no fractional part will be generated. Otherwise, the result will -// end with a fractional part consisting of a decimal point and nine digits. -func (t Time) String() string { - s := fmt.Sprintf("%02d:%02d:%02d", t.Hour, t.Minute, t.Second) - if t.Nanosecond == 0 { - return s - } - return s + fmt.Sprintf(".%09d", t.Nanosecond) -} - -// IsValid reports whether the time is valid. -func (t Time) IsValid() bool { - // Construct a non-zero time. - tm := time.Date(2, 2, 2, t.Hour, t.Minute, t.Second, t.Nanosecond, time.UTC) - return TimeOf(tm) == t -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of t.String(). -func (t Time) MarshalText() ([]byte, error) { - return []byte(t.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The time is expected to be a string in a format accepted by ParseTime. -func (t *Time) UnmarshalText(data []byte) error { - var err error - *t, err = ParseTime(string(data)) - return err -} - -// A DateTime represents a date and time. -// -// This type does not include location information, and therefore does not -// describe a unique moment in time. -type DateTime struct { - Date Date - Time Time -} - -// Note: We deliberately do not embed Date into DateTime, to avoid promoting AddDays and Sub. - -// DateTimeOf returns the DateTime in which a time occurs in that time's location. -func DateTimeOf(t time.Time) DateTime { - return DateTime{ - Date: DateOf(t), - Time: TimeOf(t), - } -} - -// ParseDateTime parses a string and returns the DateTime it represents. -// ParseDateTime accepts a variant of the RFC3339 date-time format that omits -// the time offset but includes an optional fractional time, as described in -// ParseTime. Informally, the accepted format is -// YYYY-MM-DDTHH:MM:SS[.FFFFFFFFF] -// where the 'T' may be a lower-case 't'. -func ParseDateTime(s string) (DateTime, error) { - t, err := time.Parse("2006-01-02T15:04:05.999999999", s) - if err != nil { - t, err = time.Parse("2006-01-02t15:04:05.999999999", s) - if err != nil { - return DateTime{}, err - } - } - return DateTimeOf(t), nil -} - -// String returns the date in the format described in ParseDate. -func (dt DateTime) String() string { - return dt.Date.String() + "T" + dt.Time.String() -} - -// IsValid reports whether the datetime is valid. -func (dt DateTime) IsValid() bool { - return dt.Date.IsValid() && dt.Time.IsValid() -} - -// In returns the time corresponding to the DateTime in the given location. -// -// If the time is missing or ambigous at the location, In returns the same -// result as time.Date. For example, if loc is America/Indiana/Vincennes, then -// both -// time.Date(1955, time.May, 1, 0, 30, 0, 0, loc) -// and -// civil.DateTime{ -// civil.Date{Year: 1955, Month: time.May, Day: 1}}, -// civil.Time{Minute: 30}}.In(loc) -// return 23:30:00 on April 30, 1955. -// -// In panics if loc is nil. -func (dt DateTime) In(loc *time.Location) time.Time { - return time.Date(dt.Date.Year, dt.Date.Month, dt.Date.Day, dt.Time.Hour, dt.Time.Minute, dt.Time.Second, dt.Time.Nanosecond, loc) -} - -// Before reports whether dt1 occurs before dt2. -func (dt1 DateTime) Before(dt2 DateTime) bool { - return dt1.In(time.UTC).Before(dt2.In(time.UTC)) -} - -// After reports whether dt1 occurs after dt2. -func (dt1 DateTime) After(dt2 DateTime) bool { - return dt2.Before(dt1) -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The output is the result of dt.String(). -func (dt DateTime) MarshalText() ([]byte, error) { - return []byte(dt.String()), nil -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The datetime is expected to be a string in a format accepted by ParseDateTime -func (dt *DateTime) UnmarshalText(data []byte) error { - var err error - *dt, err = ParseDateTime(string(data)) - return err -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/README.md b/vendor/github.com/denisenkom/go-mssqldb/README.md index e1a059d8855fe..8570ae9f615d5 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/README.md +++ b/vendor/github.com/denisenkom/go-mssqldb/README.md @@ -1,209 +1,78 @@ # A pure Go MSSQL driver for Go's database/sql package -[![GoDoc](https://godoc.org/github.com/denisenkom/go-mssqldb?status.svg)](http://godoc.org/github.com/denisenkom/go-mssqldb) -[![Build status](https://ci.appveyor.com/api/projects/status/jrln8cs62wj9i0a2?svg=true)](https://ci.appveyor.com/project/denisenkom/go-mssqldb) -[![codecov](https://codecov.io/gh/denisenkom/go-mssqldb/branch/master/graph/badge.svg)](https://codecov.io/gh/denisenkom/go-mssqldb) - ## Install -Requires Go 1.8 or above. - -Install with `go get github.com/denisenkom/go-mssqldb` . - -## Connection Parameters and DSN - -The recommended connection string uses a URL format: -`sqlserver://username:password@host/instance?param1=value¶m2=value` -Other supported formats are listed below. + go get github.com/denisenkom/go-mssqldb -### Common parameters: - -* `user id` - enter the SQL Server Authentication user id or the Windows Authentication user id in the DOMAIN\User format. On Windows, if user id is empty or missing Single-Sign-On is used. -* `password` -* `database` -* `connection timeout` - in seconds (default is 0 for no timeout), set to 0 for no timeout. Recommended to set to 0 and use context to manage query and connection timeouts. -* `dial timeout` - in seconds (default is 15), set to 0 for no timeout -* `encrypt` - * `disable` - Data send between client and server is not encrypted. - * `false` - Data sent between client and server is not encrypted beyond the login packet. (Default) - * `true` - Data sent between client and server is encrypted. -* `app name` - The application name (default is go-mssqldb) - -### Connection parameters for ODBC and ADO style connection strings: +## Tests -* `server` - host or host\instance (default localhost) -* `port` - used only when there is no instance in server (default 1433) +`go test` is used for testing. A running instance of MSSQL server is required. +Environment variables are used to pass login information. -### Less common parameters: +Example: -* `keepAlive` - in seconds; 0 to disable (default is 30) -* `failoverpartner` - host or host\instance (default is no partner). -* `failoverport` - used only when there is no instance in failoverpartner (default 1433) -* `packet size` - in bytes; 512 to 32767 (default is 4096) - * Encrypted connections have a maximum packet size of 16383 bytes - * Further information on usage: https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-network-packet-size-server-configuration-option -* `log` - logging flags (default 0/no logging, 63 for full logging) + env HOST=localhost SQLUSER=sa SQLPASSWORD=sa DATABASE=test go test + +## Connection Parameters + +* "server" - host or host\instance (default localhost) +* "port" - used only when there is no instance in server (default 1433) +* "failoverpartner" - host or host\instance (default is no partner). +* "failoverport" - used only when there is no instance in failoverpartner (default 1433) +* "user id" - enter the SQL Server Authentication user id or the Windows Authentication user id in the DOMAIN\User format. On Windows, if user id is empty or missing Single-Sign-On is used. +* "password" +* "database" +* "connection timeout" - in seconds (default is 30) +* "dial timeout" - in seconds (default is 5) +* "keepAlive" - in seconds; 0 to disable (default is 0) +* "log" - logging flags (default 0/no logging, 63 for full logging) * 1 log errors * 2 log messages * 4 log rows affected * 8 trace sql statements * 16 log statement parameters * 32 log transaction begin/end -* `TrustServerCertificate` +* "encrypt" + * disable - Data send between client and server is not encrypted. + * false - Data sent between client and server is not encrypted beyond the login packet. (Default) + * true - Data sent between client and server is encrypted. +* "TrustServerCertificate" * false - Server certificate is checked. Default is false if encypt is specified. * true - Server certificate is not checked. Default is true if encrypt is not specified. If trust server certificate is true, driver accepts any certificate presented by the server and any host name in that certificate. In this mode, TLS is susceptible to man-in-the-middle attacks. This should be used only for testing. -* `certificate` - The file that contains the public key certificate of the CA that signed the SQL Server certificate. The specified certificate overrides the go platform specific CA certificates. -* `hostNameInCertificate` - Specifies the Common Name (CN) in the server certificate. Default value is the server host. -* `ServerSPN` - The kerberos SPN (Service Principal Name) for the server. Default is MSSQLSvc/host:port. -* `Workstation ID` - The workstation name (default is the host name) -* `ApplicationIntent` - Can be given the value `ReadOnly` to initiate a read-only connection to an Availability Group listener. - -### The connection string can be specified in one of three formats: - - -1. URL: with `sqlserver` scheme. username and password appears before the host. Any instance appears as - the first segment in the path. All other options are query parameters. Examples: - - * `sqlserver://username:password@host/instance?param1=value¶m2=value` - * `sqlserver://username:password@host:port?param1=value¶m2=value` - * `sqlserver://sa@localhost/SQLExpress?database=master&connection+timeout=30` // `SQLExpress instance. - * `sqlserver://sa:mypass@localhost?database=master&connection+timeout=30` // username=sa, password=mypass. - * `sqlserver://sa:mypass@localhost:1234?database=master&connection+timeout=30` // port 1234 on localhost. - * `sqlserver://sa:my%7Bpass@somehost?connection+timeout=30` // password is "my{pass" +* "certificate" - The file that contains the public key certificate of the CA that signed the SQL Server certificate. The specified certificate overrides the go platform specific CA certificates. +* "hostNameInCertificate" - Specifies the Common Name (CN) in the server certificate. Default value is the server host. +* "ServerSPN" - The kerberos SPN (Service Principal Name) for the server. Default is MSSQLSvc/host:port. +* "Workstation ID" - The workstation name (default is the host name) +* "app name" - The application name (default is go-mssqldb) +* "ApplicationIntent" - Can be given the value "ReadOnly" to initiate a read-only connection to an Availability Group listener. - A string of this format can be constructed using the `URL` type in the `net/url` package. - -```go - query := url.Values{} - query.Add("app name", "MyAppName") - - u := &url.URL{ - Scheme: "sqlserver", - User: url.UserPassword(username, password), - Host: fmt.Sprintf("%s:%d", hostname, port), - // Path: instance, // if connecting to an instance instead of a port - RawQuery: query.Encode(), - } - db, err := sql.Open("sqlserver", u.String()) -``` +Example: -2. ADO: `key=value` pairs separated by `;`. Values may not contain `;`, leading and trailing whitespace is ignored. - Examples: - - * `server=localhost\\SQLExpress;user id=sa;database=master;app name=MyAppName` - * `server=localhost;user id=sa;database=master;app name=MyAppName` - -3. ODBC: Prefix with `odbc`, `key=value` pairs separated by `;`. Allow `;` by wrapping - values in `{}`. Examples: - - * `odbc:server=localhost\\SQLExpress;user id=sa;database=master;app name=MyAppName` - * `odbc:server=localhost;user id=sa;database=master;app name=MyAppName` - * `odbc:server=localhost;user id=sa;password={foo;bar}` // Value marked with `{}`, password is "foo;bar" - * `odbc:server=localhost;user id=sa;password={foo{bar}` // Value marked with `{}`, password is "foo{bar" - * `odbc:server=localhost;user id=sa;password={foobar }` // Value marked with `{}`, password is "foobar " - * `odbc:server=localhost;user id=sa;password=foo{bar` // Literal `{`, password is "foo{bar" - * `odbc:server=localhost;user id=sa;password=foo}bar` // Literal `}`, password is "foo}bar" - * `odbc:server=localhost;user id=sa;password={foo{bar}` // Literal `{`, password is "foo{bar" - * `odbc:server=localhost;user id=sa;password={foo}}bar}` // Escaped `} with `}}`, password is "foo}bar" - -## Executing Stored Procedures - -To run a stored procedure, set the query text to the procedure name: ```go -var account = "abc" -_, err := db.ExecContext(ctx, "sp_RunMe", - sql.Named("ID", 123), - sql.Named("Account", sql.Out{Dest: &account}), -) + db, err := sql.Open("mssql", "server=localhost;user id=sa") ``` -## Caveat for local temporary tables +## Statement Parameters -Due to protocol limitations, temporary tables will only be allocated on the connection -as a result of executing a query with zero parameters. The following query -will, due to the use of a parameter, execute in its own session, -and `#mytemp` will be de-allocated right away: +In the SQL statement text, literals may be replaced by a parameter that matches one of the following: -```go -conn, err := pool.Conn(ctx) -defer conn.Close() -_, err := conn.ExecContext(ctx, "select @p1 as x into #mytemp", 1) -// at this point #mytemp is already dropped again as the session of the ExecContext is over -``` +* ? +* ?nnn +* :nnn +* $nnn -To work around this, always explicitly create the local temporary -table in a query without any parameters. As a special case, the driver -will then be able to execute the query directly on the -connection-scoped session. The following example works: +where nnn represents an integer that specifies a 1-indexed positional parameter. Ex: ```go -conn, err := pool.Conn(ctx) - -// Set us up so that temp table is always cleaned up, since conn.Close() -// merely returns conn to pool, rather than actually closing the connection. -defer func() { - _, _ = conn.ExecContext(ctx, "drop table #mytemp") // always clean up - conn.Close() // merely returns conn to pool -}() - - -// Since we not pass any parameters below, the query will execute on the scope of -// the connection and succeed in creating the table. -_, err := conn.ExecContext(ctx, "create table #mytemp ( x int )") - -// #mytemp is now available even if you pass parameters -_, err := conn.ExecContext(ctx, "insert into #mytemp (x) values (@p1)", 1) - +db.Query("SELECT * FROM t WHERE a = ?3, b = ?2, c = ?1", "x", "y", "z") ``` -## Return Status - -To get the procedure return status, pass into the parameters a -`*mssql.ReturnStatus`. For example: -``` -var rs mssql.ReturnStatus -_, err := db.ExecContext(ctx, "theproc", &rs) -log.Printf("status=%d", rs) -``` - -## Parameters - -The `sqlserver` driver uses normal MS SQL Server syntax and expects parameters in -the sql query to be in the form of either `@Name` or `@p1` to `@pN` (ordinal position). +will expand to roughly -```go -db.QueryContext(ctx, `select * from t where ID = @ID and Name = @p2;`, sql.Named("ID", 6), "Bob") +```sql +SELECT * FROM t WHERE a = 'z', b = 'y', c = 'x' ``` -### Parameter Types - -To pass specific types to the query parameters, say `varchar` or `date` types, -you must convert the types to the type before passing in. The following types -are supported: - - * string -> nvarchar - * mssql.VarChar -> varchar - * time.Time -> datetimeoffset or datetime (TDS version dependent) - * mssql.DateTime1 -> datetime - * mssql.DateTimeOffset -> datetimeoffset - * "cloud.google.com/go/civil".Date -> date - * "cloud.google.com/go/civil".DateTime -> datetime2 - * "cloud.google.com/go/civil".Time -> time - -## Important Notes - - * [LastInsertId](https://golang.org/pkg/database/sql/#Result.LastInsertId) should - not be used with this driver (or SQL Server) due to how the TDS protocol - works. Please use the [OUTPUT Clause](https://docs.microsoft.com/en-us/sql/t-sql/queries/output-clause-transact-sql) - or add a `select ID = convert(bigint, SCOPE_IDENTITY());` to the end of your - query (ref [SCOPE_IDENTITY](https://docs.microsoft.com/en-us/sql/t-sql/functions/scope-identity-transact-sql)). - This will ensure you are getting the correct ID and will prevent a network round trip. - * [NewConnector](https://godoc.org/github.com/denisenkom/go-mssqldb#NewConnector) - may be used with [OpenDB](https://golang.org/pkg/database/sql/#OpenDB). - * [Connector.SessionInitSQL](https://godoc.org/github.com/denisenkom/go-mssqldb#Connector.SessionInitSQL) - may be set to set any driver specific session settings after the session - has been reset. If empty the session will still be reset but use the database - defaults in Go1.10+. ## Features @@ -218,34 +87,6 @@ are supported: * Supports connections to AlwaysOn Availability Group listeners, including re-direction to read-only replicas. * Supports query notifications -## Tests - -`go test` is used for testing. A running instance of MSSQL server is required. -Environment variables are used to pass login information. - -Example: - - env SQLSERVER_DSN=sqlserver://user:pass@hostname/instance?database=test1 go test - -## Deprecated - -These features still exist in the driver, but they are are deprecated. - -### Query Parameter Token Replace (driver "mssql") - -If you use the driver name "mssql" (rather then "sqlserver") the SQL text -will be loosly parsed and an attempt to extract identifiers using one of - -* ? -* ?nnn -* :nnn -* $nnn - -will be used. This is not recommended with SQL Server. -There is at least one existing `won't fix` issue with the query parsing. - -Use the native "@Name" parameters instead with the "sqlserver" driver name. - ## Known Issues * SQL Server 2008 and 2008 R2 engine cannot handle login records when SSL encryption is not disabled. diff --git a/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml b/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml deleted file mode 100644 index 2ae5456d5cb70..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/appveyor.yml +++ /dev/null @@ -1,48 +0,0 @@ -version: 1.0.{build} - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\denisenkom\go-mssqldb - -environment: - GOPATH: c:\gopath - HOST: localhost - SQLUSER: sa - SQLPASSWORD: Password12! - DATABASE: test - GOVERSION: 110 - matrix: - - GOVERSION: 18 - SQLINSTANCE: SQL2016 - - GOVERSION: 19 - SQLINSTANCE: SQL2016 - - GOVERSION: 110 - SQLINSTANCE: SQL2016 - - SQLINSTANCE: SQL2014 - - SQLINSTANCE: SQL2012SP1 - - SQLINSTANCE: SQL2008R2SP2 - -install: - - set GOROOT=c:\go%GOVERSION% - - set PATH=%GOPATH%\bin;%GOROOT%\bin;%PATH% - - go version - - go env - - go get -u cloud.google.com/go/civil - -build_script: - - go build - -before_test: - # setup SQL Server - - ps: | - $instanceName = $env:SQLINSTANCE - Start-Service "MSSQL`$$instanceName" - Start-Service "SQLBrowser" - - sqlcmd -S "(local)\%SQLINSTANCE%" -Q "Use [master]; CREATE DATABASE test;" - - sqlcmd -S "(local)\%SQLINSTANCE%" -h -1 -Q "set nocount on; Select @@version" - - pip install codecov - - -test_script: - - go test -race -cpu 4 -coverprofile=coverage.txt -covermode=atomic - - codecov -f coverage.txt diff --git a/vendor/github.com/denisenkom/go-mssqldb/buf.go b/vendor/github.com/denisenkom/go-mssqldb/buf.go index 927d75d1b78b4..42e8ae345cf48 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/buf.go +++ b/vendor/github.com/denisenkom/go-mssqldb/buf.go @@ -2,14 +2,12 @@ package mssql import ( "encoding/binary" - "errors" "io" + "errors" ) -type packetType uint8 - type header struct { - PacketType packetType + PacketType uint8 Status uint8 Size uint16 Spid uint16 @@ -17,159 +15,125 @@ type header struct { Pad uint8 } -// tdsBuffer reads and writes TDS packets of data to the transport. -// The write and read buffers are separate to make sending attn signals -// possible without locks. Currently attn signals are only sent during -// reads, not writes. type tdsBuffer struct { - transport io.ReadWriteCloser - - packetSize int - - // Write fields. - wbuf []byte - wpos int - wPacketSeq byte - wPacketType packetType - - // Read fields. - rbuf []byte - rpos int - rsize int + buf []byte + pos uint16 + transport io.ReadWriteCloser + size uint16 final bool - rPacketType packetType - - // afterFirst is assigned to right after tdsBuffer is created and - // before the first use. It is executed after the first packet is - // written and then removed. - afterFirst func() + packet_type uint8 + afterFirst func() } -func newTdsBuffer(bufsize uint16, transport io.ReadWriteCloser) *tdsBuffer { - return &tdsBuffer{ - packetSize: int(bufsize), - wbuf: make([]byte, 1<<16), - rbuf: make([]byte, 1<<16), - rpos: 8, - transport: transport, - } -} - -func (rw *tdsBuffer) ResizeBuffer(packetSize int) { - rw.packetSize = packetSize -} - -func (w *tdsBuffer) PackageSize() int { - return w.packetSize +func newTdsBuffer(bufsize int, transport io.ReadWriteCloser) *tdsBuffer { + buf := make([]byte, bufsize) + w := new(tdsBuffer) + w.buf = buf + w.pos = 8 + w.transport = transport + w.size = 0 + return w } func (w *tdsBuffer) flush() (err error) { - // Write packet size. - w.wbuf[0] = byte(w.wPacketType) - binary.BigEndian.PutUint16(w.wbuf[2:], uint16(w.wpos)) - w.wbuf[6] = w.wPacketSeq + // writing packet size + binary.BigEndian.PutUint16(w.buf[2:], w.pos) - // Write packet into underlying transport. - if _, err = w.transport.Write(w.wbuf[:w.wpos]); err != nil { + // writing packet into underlying transport + if _, err = w.transport.Write(w.buf[:w.pos]); err != nil { return err } - // It is possible to create a whole new buffer after a flush. - // Useful for debugging. Normally reuse the buffer. - // w.wbuf = make([]byte, 1<<16) - // Execute afterFirst hook if it is set. + // execute afterFirst hook if it is set if w.afterFirst != nil { w.afterFirst() w.afterFirst = nil } - w.wpos = 8 - w.wPacketSeq++ + w.pos = 8 + // packet number + w.buf[6] += 1 return nil } func (w *tdsBuffer) Write(p []byte) (total int, err error) { + total = 0 for { - copied := copy(w.wbuf[w.wpos:w.packetSize], p) - w.wpos += copied + copied := copy(w.buf[w.pos:], p) + w.pos += uint16(copied) total += copied if copied == len(p) { - return + break } if err = w.flush(); err != nil { return } p = p[copied:] } + return } func (w *tdsBuffer) WriteByte(b byte) error { - if int(w.wpos) == len(w.wbuf) || w.wpos == w.packetSize { + if int(w.pos) == len(w.buf) { if err := w.flush(); err != nil { return err } } - w.wbuf[w.wpos] = b - w.wpos += 1 + w.buf[w.pos] = b + w.pos += 1 return nil } -func (w *tdsBuffer) BeginPacket(packetType packetType, resetSession bool) { - status := byte(0) - if resetSession { - switch packetType { - // Reset session can only be set on the following packet types. - case packSQLBatch, packRPCRequest, packTransMgrReq: - status = 0x8 - } - } - w.wbuf[1] = status // Packet is incomplete. This byte is set again in FinishPacket. - w.wpos = 8 - w.wPacketSeq = 1 - w.wPacketType = packetType +func (w *tdsBuffer) BeginPacket(packet_type byte) { + w.buf[0] = packet_type + w.buf[1] = 0 // packet is incomplete + w.buf[4] = 0 // spid + w.buf[5] = 0 + w.buf[6] = 1 // packet id + w.buf[7] = 0 // window + w.pos = 8 } func (w *tdsBuffer) FinishPacket() error { - w.wbuf[1] |= 1 // Mark this as the last packet in the message. + w.buf[1] = 1 // this is last packet return w.flush() } -var headerSize = binary.Size(header{}) - func (r *tdsBuffer) readNextPacket() error { - h := header{} + header := header{} var err error - err = binary.Read(r.transport, binary.BigEndian, &h) + err = binary.Read(r.transport, binary.BigEndian, &header) if err != nil { return err } - if int(h.Size) > r.packetSize { + offset := uint16(binary.Size(header)) + if int(header.Size) > len(r.buf) { return errors.New("Invalid packet size, it is longer than buffer size") } - if headerSize > int(h.Size) { + if int(offset) > int(header.Size) { return errors.New("Invalid packet size, it is shorter than header size") } - _, err = io.ReadFull(r.transport, r.rbuf[headerSize:h.Size]) + _, err = io.ReadFull(r.transport, r.buf[offset:header.Size]) if err != nil { return err } - r.rpos = headerSize - r.rsize = int(h.Size) - r.final = h.Status != 0 - r.rPacketType = h.PacketType + r.pos = offset + r.size = header.Size + r.final = header.Status != 0 + r.packet_type = header.PacketType return nil } -func (r *tdsBuffer) BeginRead() (packetType, error) { +func (r *tdsBuffer) BeginRead() (uint8, error) { err := r.readNextPacket() if err != nil { return 0, err } - return r.rPacketType, nil + return r.packet_type, nil } func (r *tdsBuffer) ReadByte() (res byte, err error) { - if r.rpos == r.rsize { + if r.pos == r.size { if r.final { return 0, io.EOF } @@ -178,8 +142,8 @@ func (r *tdsBuffer) ReadByte() (res byte, err error) { return 0, err } } - res = r.rbuf[r.rpos] - r.rpos++ + res = r.buf[r.pos] + r.pos++ return res, nil } @@ -243,7 +207,7 @@ func (r *tdsBuffer) readUcs2(numchars int) string { func (r *tdsBuffer) Read(buf []byte) (copied int, err error) { copied = 0 err = nil - if r.rpos == r.rsize { + if r.pos == r.size { if r.final { return 0, io.EOF } @@ -252,7 +216,7 @@ func (r *tdsBuffer) Read(buf []byte) (copied int, err error) { return } } - copied = copy(buf, r.rbuf[r.rpos:r.rsize]) - r.rpos += copied + copied = copy(buf, r.buf[r.pos:r.size]) + r.pos += uint16(copied) return } diff --git a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go deleted file mode 100644 index 3b319af893fd3..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy.go +++ /dev/null @@ -1,554 +0,0 @@ -package mssql - -import ( - "bytes" - "context" - "encoding/binary" - "fmt" - "math" - "reflect" - "strconv" - "strings" - "time" -) - -type Bulk struct { - // ctx is used only for AddRow and Done methods. - // This could be removed if AddRow and Done accepted - // a ctx field as well, which is available with the - // database/sql call. - ctx context.Context - - cn *Conn - metadata []columnStruct - bulkColumns []columnStruct - columnsName []string - tablename string - numRows int - - headerSent bool - Options BulkOptions - Debug bool -} -type BulkOptions struct { - CheckConstraints bool - FireTriggers bool - KeepNulls bool - KilobytesPerBatch int - RowsPerBatch int - Order []string - Tablock bool -} - -type DataValue interface{} - -func (cn *Conn) CreateBulk(table string, columns []string) (_ *Bulk) { - b := Bulk{ctx: context.Background(), cn: cn, tablename: table, headerSent: false, columnsName: columns} - b.Debug = false - return &b -} - -func (cn *Conn) CreateBulkContext(ctx context.Context, table string, columns []string) (_ *Bulk) { - b := Bulk{ctx: ctx, cn: cn, tablename: table, headerSent: false, columnsName: columns} - b.Debug = false - return &b -} - -func (b *Bulk) sendBulkCommand(ctx context.Context) (err error) { - //get table columns info - err = b.getMetadata(ctx) - if err != nil { - return err - } - - //match the columns - for _, colname := range b.columnsName { - var bulkCol *columnStruct - - for _, m := range b.metadata { - if m.ColName == colname { - bulkCol = &m - break - } - } - if bulkCol != nil { - - if bulkCol.ti.TypeId == typeUdt { - //send udt as binary - bulkCol.ti.TypeId = typeBigVarBin - } - b.bulkColumns = append(b.bulkColumns, *bulkCol) - b.dlogf("Adding column %s %s %#x", colname, bulkCol.ColName, bulkCol.ti.TypeId) - } else { - return fmt.Errorf("Column %s does not exist in destination table %s", colname, b.tablename) - } - } - - //create the bulk command - - //columns definitions - var col_defs bytes.Buffer - for i, col := range b.bulkColumns { - if i != 0 { - col_defs.WriteString(", ") - } - col_defs.WriteString("[" + col.ColName + "] " + makeDecl(col.ti)) - } - - //options - var with_opts []string - - if b.Options.CheckConstraints { - with_opts = append(with_opts, "CHECK_CONSTRAINTS") - } - if b.Options.FireTriggers { - with_opts = append(with_opts, "FIRE_TRIGGERS") - } - if b.Options.KeepNulls { - with_opts = append(with_opts, "KEEP_NULLS") - } - if b.Options.KilobytesPerBatch > 0 { - with_opts = append(with_opts, fmt.Sprintf("KILOBYTES_PER_BATCH = %d", b.Options.KilobytesPerBatch)) - } - if b.Options.RowsPerBatch > 0 { - with_opts = append(with_opts, fmt.Sprintf("ROWS_PER_BATCH = %d", b.Options.RowsPerBatch)) - } - if len(b.Options.Order) > 0 { - with_opts = append(with_opts, fmt.Sprintf("ORDER(%s)", strings.Join(b.Options.Order, ","))) - } - if b.Options.Tablock { - with_opts = append(with_opts, "TABLOCK") - } - var with_part string - if len(with_opts) > 0 { - with_part = fmt.Sprintf("WITH (%s)", strings.Join(with_opts, ",")) - } - - query := fmt.Sprintf("INSERT BULK %s (%s) %s", b.tablename, col_defs.String(), with_part) - - stmt, err := b.cn.PrepareContext(ctx, query) - if err != nil { - return fmt.Errorf("Prepare failed: %s", err.Error()) - } - b.dlogf(query) - - _, err = stmt.(*Stmt).ExecContext(ctx, nil) - if err != nil { - return err - } - - b.headerSent = true - - var buf = b.cn.sess.buf - buf.BeginPacket(packBulkLoadBCP, false) - - // Send the columns metadata. - columnMetadata := b.createColMetadata() - _, err = buf.Write(columnMetadata) - - return -} - -// AddRow immediately writes the row to the destination table. -// The arguments are the row values in the order they were specified. -func (b *Bulk) AddRow(row []interface{}) (err error) { - if !b.headerSent { - err = b.sendBulkCommand(b.ctx) - if err != nil { - return - } - } - - if len(row) != len(b.bulkColumns) { - return fmt.Errorf("Row does not have the same number of columns than the destination table %d %d", - len(row), len(b.bulkColumns)) - } - - bytes, err := b.makeRowData(row) - if err != nil { - return - } - - _, err = b.cn.sess.buf.Write(bytes) - if err != nil { - return - } - - b.numRows = b.numRows + 1 - return -} - -func (b *Bulk) makeRowData(row []interface{}) ([]byte, error) { - buf := new(bytes.Buffer) - buf.WriteByte(byte(tokenRow)) - - var logcol bytes.Buffer - for i, col := range b.bulkColumns { - - if b.Debug { - logcol.WriteString(fmt.Sprintf(" col[%d]='%v' ", i, row[i])) - } - param, err := b.makeParam(row[i], col) - if err != nil { - return nil, fmt.Errorf("bulkcopy: %s", err.Error()) - } - - if col.ti.Writer == nil { - return nil, fmt.Errorf("no writer for column: %s, TypeId: %#x", - col.ColName, col.ti.TypeId) - } - err = col.ti.Writer(buf, param.ti, param.buffer) - if err != nil { - return nil, fmt.Errorf("bulkcopy: %s", err.Error()) - } - } - - b.dlogf("row[%d] %s\n", b.numRows, logcol.String()) - - return buf.Bytes(), nil -} - -func (b *Bulk) Done() (rowcount int64, err error) { - if b.headerSent == false { - //no rows had been sent - return 0, nil - } - var buf = b.cn.sess.buf - buf.WriteByte(byte(tokenDone)) - - binary.Write(buf, binary.LittleEndian, uint16(doneFinal)) - binary.Write(buf, binary.LittleEndian, uint16(0)) // curcmd - - if b.cn.sess.loginAck.TDSVersion >= verTDS72 { - binary.Write(buf, binary.LittleEndian, uint64(0)) //rowcount 0 - } else { - binary.Write(buf, binary.LittleEndian, uint32(0)) //rowcount 0 - } - - buf.FinishPacket() - - tokchan := make(chan tokenStruct, 5) - go processResponse(b.ctx, b.cn.sess, tokchan, nil) - - var rowCount int64 - for token := range tokchan { - switch token := token.(type) { - case doneStruct: - if token.Status&doneCount != 0 { - rowCount = int64(token.RowCount) - } - if token.isError() { - return 0, token.getError() - } - case error: - return 0, b.cn.checkBadConn(token) - } - } - return rowCount, nil -} - -func (b *Bulk) createColMetadata() []byte { - buf := new(bytes.Buffer) - buf.WriteByte(byte(tokenColMetadata)) // token - binary.Write(buf, binary.LittleEndian, uint16(len(b.bulkColumns))) // column count - - for i, col := range b.bulkColumns { - - if b.cn.sess.loginAck.TDSVersion >= verTDS72 { - binary.Write(buf, binary.LittleEndian, uint32(col.UserType)) // usertype, always 0? - } else { - binary.Write(buf, binary.LittleEndian, uint16(col.UserType)) - } - binary.Write(buf, binary.LittleEndian, uint16(col.Flags)) - - writeTypeInfo(buf, &b.bulkColumns[i].ti) - - if col.ti.TypeId == typeNText || - col.ti.TypeId == typeText || - col.ti.TypeId == typeImage { - - tablename_ucs2 := str2ucs2(b.tablename) - binary.Write(buf, binary.LittleEndian, uint16(len(tablename_ucs2)/2)) - buf.Write(tablename_ucs2) - } - colname_ucs2 := str2ucs2(col.ColName) - buf.WriteByte(uint8(len(colname_ucs2) / 2)) - buf.Write(colname_ucs2) - } - - return buf.Bytes() -} - -func (b *Bulk) getMetadata(ctx context.Context) (err error) { - stmt, err := b.cn.prepareContext(ctx, "SET FMTONLY ON") - if err != nil { - return - } - - _, err = stmt.ExecContext(ctx, nil) - if err != nil { - return - } - - // Get columns info. - stmt, err = b.cn.prepareContext(ctx, fmt.Sprintf("select * from %s SET FMTONLY OFF", b.tablename)) - if err != nil { - return - } - rows, err := stmt.QueryContext(ctx, nil) - if err != nil { - return fmt.Errorf("get columns info failed: %v", err) - } - b.metadata = rows.(*Rows).cols - - if b.Debug { - for _, col := range b.metadata { - b.dlogf("col: %s typeId: %#x size: %d scale: %d prec: %d flags: %d lcid: %#x\n", - col.ColName, col.ti.TypeId, col.ti.Size, col.ti.Scale, col.ti.Prec, - col.Flags, col.ti.Collation.LcidAndFlags) - } - } - - return rows.Close() -} - -func (b *Bulk) makeParam(val DataValue, col columnStruct) (res param, err error) { - res.ti.Size = col.ti.Size - res.ti.TypeId = col.ti.TypeId - - if val == nil { - res.ti.Size = 0 - return - } - - switch col.ti.TypeId { - - case typeInt1, typeInt2, typeInt4, typeInt8, typeIntN: - var intvalue int64 - - switch val := val.(type) { - case int: - intvalue = int64(val) - case int32: - intvalue = int64(val) - case int64: - intvalue = val - default: - err = fmt.Errorf("mssql: invalid type for int column") - return - } - - res.buffer = make([]byte, res.ti.Size) - if col.ti.Size == 1 { - res.buffer[0] = byte(intvalue) - } else if col.ti.Size == 2 { - binary.LittleEndian.PutUint16(res.buffer, uint16(intvalue)) - } else if col.ti.Size == 4 { - binary.LittleEndian.PutUint32(res.buffer, uint32(intvalue)) - } else if col.ti.Size == 8 { - binary.LittleEndian.PutUint64(res.buffer, uint64(intvalue)) - } - case typeFlt4, typeFlt8, typeFltN: - var floatvalue float64 - - switch val := val.(type) { - case float32: - floatvalue = float64(val) - case float64: - floatvalue = val - case int: - floatvalue = float64(val) - case int64: - floatvalue = float64(val) - default: - err = fmt.Errorf("mssql: invalid type for float column: %s", val) - return - } - - if col.ti.Size == 4 { - res.buffer = make([]byte, 4) - binary.LittleEndian.PutUint32(res.buffer, math.Float32bits(float32(floatvalue))) - } else if col.ti.Size == 8 { - res.buffer = make([]byte, 8) - binary.LittleEndian.PutUint64(res.buffer, math.Float64bits(floatvalue)) - } - case typeNVarChar, typeNText, typeNChar: - - switch val := val.(type) { - case string: - res.buffer = str2ucs2(val) - case []byte: - res.buffer = val - default: - err = fmt.Errorf("mssql: invalid type for nvarchar column: %s", val) - return - } - res.ti.Size = len(res.buffer) - - case typeVarChar, typeBigVarChar, typeText, typeChar, typeBigChar: - switch val := val.(type) { - case string: - res.buffer = []byte(val) - case []byte: - res.buffer = val - default: - err = fmt.Errorf("mssql: invalid type for varchar column: %s", val) - return - } - res.ti.Size = len(res.buffer) - - case typeBit, typeBitN: - if reflect.TypeOf(val).Kind() != reflect.Bool { - err = fmt.Errorf("mssql: invalid type for bit column: %s", val) - return - } - res.ti.TypeId = typeBitN - res.ti.Size = 1 - res.buffer = make([]byte, 1) - if val.(bool) { - res.buffer[0] = 1 - } - case typeDateTime2N: - switch val := val.(type) { - case time.Time: - res.buffer = encodeDateTime2(val, int(col.ti.Scale)) - res.ti.Size = len(res.buffer) - default: - err = fmt.Errorf("mssql: invalid type for datetime2 column: %s", val) - return - } - case typeDateTimeOffsetN: - switch val := val.(type) { - case time.Time: - res.buffer = encodeDateTimeOffset(val, int(res.ti.Scale)) - res.ti.Size = len(res.buffer) - - default: - err = fmt.Errorf("mssql: invalid type for datetimeoffset column: %s", val) - return - } - case typeDateN: - switch val := val.(type) { - case time.Time: - res.buffer = encodeDate(val) - res.ti.Size = len(res.buffer) - default: - err = fmt.Errorf("mssql: invalid type for date column: %s", val) - return - } - case typeDateTime, typeDateTimeN, typeDateTim4: - switch val := val.(type) { - case time.Time: - if col.ti.Size == 4 { - res.buffer = encodeDateTim4(val) - res.ti.Size = len(res.buffer) - } else if col.ti.Size == 8 { - res.buffer = encodeDateTime(val) - res.ti.Size = len(res.buffer) - } else { - err = fmt.Errorf("mssql: invalid size of column") - } - - default: - err = fmt.Errorf("mssql: invalid type for datetime column: %s", val) - } - - // case typeMoney, typeMoney4, typeMoneyN: - case typeDecimal, typeDecimalN, typeNumeric, typeNumericN: - var value float64 - switch v := val.(type) { - case int: - value = float64(v) - case int8: - value = float64(v) - case int16: - value = float64(v) - case int32: - value = float64(v) - case int64: - value = float64(v) - case float32: - value = float64(v) - case float64: - value = v - case string: - if value, err = strconv.ParseFloat(v, 64); err != nil { - return res, fmt.Errorf("bulk: unable to convert string to float: %v", err) - } - default: - return res, fmt.Errorf("unknown value for decimal: %#v", v) - } - - perc := col.ti.Prec - scale := col.ti.Scale - var dec Decimal - dec, err = Float64ToDecimalScale(value, scale) - if err != nil { - return res, err - } - dec.prec = perc - - var length byte - switch { - case perc <= 9: - length = 4 - case perc <= 19: - length = 8 - case perc <= 28: - length = 12 - default: - length = 16 - } - - buf := make([]byte, length+1) - // first byte length written by typeInfo.writer - res.ti.Size = int(length) + 1 - // second byte sign - if value < 0 { - buf[0] = 0 - } else { - buf[0] = 1 - } - - ub := dec.UnscaledBytes() - l := len(ub) - if l > int(length) { - err = fmt.Errorf("decimal out of range: %s", dec) - return res, err - } - // reverse the bytes - for i, j := 1, l-1; j >= 0; i, j = i+1, j-1 { - buf[i] = ub[j] - } - res.buffer = buf - case typeBigVarBin, typeBigBinary: - switch val := val.(type) { - case []byte: - res.ti.Size = len(val) - res.buffer = val - default: - err = fmt.Errorf("mssql: invalid type for Binary column: %s", val) - return - } - case typeGuid: - switch val := val.(type) { - case []byte: - res.ti.Size = len(val) - res.buffer = val - default: - err = fmt.Errorf("mssql: invalid type for Guid column: %s", val) - return - } - - default: - err = fmt.Errorf("mssql: type %x not implemented", col.ti.TypeId) - } - return - -} - -func (b *Bulk) dlogf(format string, v ...interface{}) { - if b.Debug { - b.cn.sess.log.Printf(format, v...) - } -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go b/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go deleted file mode 100644 index 709505b2a06a7..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/bulkcopy_sql.go +++ /dev/null @@ -1,93 +0,0 @@ -package mssql - -import ( - "context" - "database/sql/driver" - "encoding/json" - "errors" -) - -type copyin struct { - cn *Conn - bulkcopy *Bulk - closed bool -} - -type serializableBulkConfig struct { - TableName string - ColumnsName []string - Options BulkOptions -} - -func (d *Driver) OpenConnection(dsn string) (*Conn, error) { - return d.open(context.Background(), dsn) -} - -func (c *Conn) prepareCopyIn(ctx context.Context, query string) (_ driver.Stmt, err error) { - config_json := query[11:] - - bulkconfig := serializableBulkConfig{} - err = json.Unmarshal([]byte(config_json), &bulkconfig) - if err != nil { - return - } - - bulkcopy := c.CreateBulkContext(ctx, bulkconfig.TableName, bulkconfig.ColumnsName) - bulkcopy.Options = bulkconfig.Options - - ci := ©in{ - cn: c, - bulkcopy: bulkcopy, - } - - return ci, nil -} - -func CopyIn(table string, options BulkOptions, columns ...string) string { - bulkconfig := &serializableBulkConfig{TableName: table, Options: options, ColumnsName: columns} - - config_json, err := json.Marshal(bulkconfig) - if err != nil { - panic(err) - } - - stmt := "INSERTBULK " + string(config_json) - - return stmt -} - -func (ci *copyin) NumInput() int { - return -1 -} - -func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { - panic("should never be called") -} - -func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { - if ci.closed { - return nil, errors.New("copyin query is closed") - } - - if len(v) == 0 { - rowCount, err := ci.bulkcopy.Done() - ci.closed = true - return driver.RowsAffected(rowCount), err - } - - t := make([]interface{}, len(v)) - for i, val := range v { - t[i] = val - } - - err = ci.bulkcopy.AddRow(t) - if err != nil { - return - } - - return driver.RowsAffected(0), nil -} - -func (ci *copyin) Close() (err error) { - return nil -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/charset.go b/vendor/github.com/denisenkom/go-mssqldb/charset.go similarity index 94% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/charset.go rename to vendor/github.com/denisenkom/go-mssqldb/charset.go index 8dc2279ea4833..f1cc247a9d6f1 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/charset.go +++ b/vendor/github.com/denisenkom/go-mssqldb/charset.go @@ -1,14 +1,14 @@ -package cp +package mssql type charsetMap struct { sb [256]rune // single byte runes, -1 for a double byte character lead byte db map[int]rune // double byte runes } -func collation2charset(col Collation) *charsetMap { +func collation2charset(col collation) *charsetMap { // http://msdn.microsoft.com/en-us/library/ms144250.aspx // http://msdn.microsoft.com/en-us/library/ms144250(v=sql.105).aspx - switch col.SortId { + switch col.sortId { case 30, 31, 32, 33, 34: return cp437 case 40, 41, 42, 44, 49, 55, 56, 57, 58, 59, 60, 61: @@ -86,7 +86,7 @@ func collation2charset(col Collation) *charsetMap { return cp1252 } -func CharsetToUTF8(col Collation, s []byte) string { +func charset2utf8(col collation, s []byte) string { cm := collation2charset(col) if cm == nil { return string(s) diff --git a/vendor/github.com/denisenkom/go-mssqldb/collation.go b/vendor/github.com/denisenkom/go-mssqldb/collation.go new file mode 100644 index 0000000000000..ac9cf20b7b051 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/collation.go @@ -0,0 +1,39 @@ +package mssql + +import ( + "encoding/binary" + "io" +) + +// http://msdn.microsoft.com/en-us/library/dd340437.aspx + +type collation struct { + lcidAndFlags uint32 + sortId uint8 +} + +func (c collation) getLcid() uint32 { + return c.lcidAndFlags & 0x000fffff +} + +func (c collation) getFlags() uint32 { + return (c.lcidAndFlags & 0x0ff00000) >> 20 +} + +func (c collation) getVersion() uint32 { + return (c.lcidAndFlags & 0xf0000000) >> 28 +} + +func readCollation(r *tdsBuffer) (res collation) { + res.lcidAndFlags = r.uint32() + res.sortId = r.byte() + return +} + +func writeCollation(w io.Writer, col collation) (err error) { + if err = binary.Write(w, binary.LittleEndian, col.lcidAndFlags); err != nil { + return + } + err = binary.Write(w, binary.LittleEndian, col.sortId) + return +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/convert.go b/vendor/github.com/denisenkom/go-mssqldb/convert.go deleted file mode 100644 index 51bd4ee3ac73b..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/convert.go +++ /dev/null @@ -1,306 +0,0 @@ -package mssql - -import "errors" - -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Type conversions for Scan. - -// This file was imported from database.sql.convert for go 1.10.3 with minor modifications to get -// convertAssign function -// This function is used internally by sql to convert values during call to Scan, we need same -// logic to return values for OUTPUT parameters. -// TODO: sql library should instead expose function defaultCheckNamedValue to be callable by drivers - -import ( - "database/sql" - "database/sql/driver" - "fmt" - "reflect" - "strconv" - "time" -) - -var errNilPtr = errors.New("destination pointer is nil") // embedded in descriptive error - -// convertAssign copies to dest the value in src, converting it if possible. -// An error is returned if the copy would result in loss of information. -// dest should be a pointer type. -func convertAssign(dest, src interface{}) error { - // Common cases, without reflect. - switch s := src.(type) { - case string: - switch d := dest.(type) { - case *string: - if d == nil { - return errNilPtr - } - *d = s - return nil - case *[]byte: - if d == nil { - return errNilPtr - } - *d = []byte(s) - return nil - case *sql.RawBytes: - if d == nil { - return errNilPtr - } - *d = append((*d)[:0], s...) - return nil - } - case []byte: - switch d := dest.(type) { - case *string: - if d == nil { - return errNilPtr - } - *d = string(s) - return nil - case *interface{}: - if d == nil { - return errNilPtr - } - *d = cloneBytes(s) - return nil - case *[]byte: - if d == nil { - return errNilPtr - } - *d = cloneBytes(s) - return nil - case *sql.RawBytes: - if d == nil { - return errNilPtr - } - *d = s - return nil - } - case time.Time: - switch d := dest.(type) { - case *time.Time: - *d = s - return nil - case *string: - *d = s.Format(time.RFC3339Nano) - return nil - case *[]byte: - if d == nil { - return errNilPtr - } - *d = []byte(s.Format(time.RFC3339Nano)) - return nil - case *sql.RawBytes: - if d == nil { - return errNilPtr - } - *d = s.AppendFormat((*d)[:0], time.RFC3339Nano) - return nil - } - case nil: - switch d := dest.(type) { - case *interface{}: - if d == nil { - return errNilPtr - } - *d = nil - return nil - case *[]byte: - if d == nil { - return errNilPtr - } - *d = nil - return nil - case *sql.RawBytes: - if d == nil { - return errNilPtr - } - *d = nil - return nil - } - } - - var sv reflect.Value - - switch d := dest.(type) { - case *string: - sv = reflect.ValueOf(src) - switch sv.Kind() { - case reflect.Bool, - reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, - reflect.Float32, reflect.Float64: - *d = asString(src) - return nil - } - case *[]byte: - sv = reflect.ValueOf(src) - if b, ok := asBytes(nil, sv); ok { - *d = b - return nil - } - case *sql.RawBytes: - sv = reflect.ValueOf(src) - if b, ok := asBytes([]byte(*d)[:0], sv); ok { - *d = sql.RawBytes(b) - return nil - } - case *bool: - bv, err := driver.Bool.ConvertValue(src) - if err == nil { - *d = bv.(bool) - } - return err - case *interface{}: - *d = src - return nil - } - - if scanner, ok := dest.(sql.Scanner); ok { - return scanner.Scan(src) - } - - dpv := reflect.ValueOf(dest) - if dpv.Kind() != reflect.Ptr { - return errors.New("destination not a pointer") - } - if dpv.IsNil() { - return errNilPtr - } - - if !sv.IsValid() { - sv = reflect.ValueOf(src) - } - - dv := reflect.Indirect(dpv) - if sv.IsValid() && sv.Type().AssignableTo(dv.Type()) { - switch b := src.(type) { - case []byte: - dv.Set(reflect.ValueOf(cloneBytes(b))) - default: - dv.Set(sv) - } - return nil - } - - if dv.Kind() == sv.Kind() && sv.Type().ConvertibleTo(dv.Type()) { - dv.Set(sv.Convert(dv.Type())) - return nil - } - - // The following conversions use a string value as an intermediate representation - // to convert between various numeric types. - // - // This also allows scanning into user defined types such as "type Int int64". - // For symmetry, also check for string destination types. - switch dv.Kind() { - case reflect.Ptr: - if src == nil { - dv.Set(reflect.Zero(dv.Type())) - return nil - } else { - dv.Set(reflect.New(dv.Type().Elem())) - return convertAssign(dv.Interface(), src) - } - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - s := asString(src) - i64, err := strconv.ParseInt(s, 10, dv.Type().Bits()) - if err != nil { - err = strconvErr(err) - return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) - } - dv.SetInt(i64) - return nil - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - s := asString(src) - u64, err := strconv.ParseUint(s, 10, dv.Type().Bits()) - if err != nil { - err = strconvErr(err) - return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) - } - dv.SetUint(u64) - return nil - case reflect.Float32, reflect.Float64: - s := asString(src) - f64, err := strconv.ParseFloat(s, dv.Type().Bits()) - if err != nil { - err = strconvErr(err) - return fmt.Errorf("converting driver.Value type %T (%q) to a %s: %v", src, s, dv.Kind(), err) - } - dv.SetFloat(f64) - return nil - case reflect.String: - switch v := src.(type) { - case string: - dv.SetString(v) - return nil - case []byte: - dv.SetString(string(v)) - return nil - } - } - - return fmt.Errorf("unsupported Scan, storing driver.Value type %T into type %T", src, dest) -} - -func strconvErr(err error) error { - if ne, ok := err.(*strconv.NumError); ok { - return ne.Err - } - return err -} - -func cloneBytes(b []byte) []byte { - if b == nil { - return nil - } else { - c := make([]byte, len(b)) - copy(c, b) - return c - } -} - -func asString(src interface{}) string { - switch v := src.(type) { - case string: - return v - case []byte: - return string(v) - } - rv := reflect.ValueOf(src) - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.FormatInt(rv.Int(), 10) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.FormatUint(rv.Uint(), 10) - case reflect.Float64: - return strconv.FormatFloat(rv.Float(), 'g', -1, 64) - case reflect.Float32: - return strconv.FormatFloat(rv.Float(), 'g', -1, 32) - case reflect.Bool: - return strconv.FormatBool(rv.Bool()) - } - return fmt.Sprintf("%v", src) -} - -func asBytes(buf []byte, rv reflect.Value) (b []byte, ok bool) { - switch rv.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return strconv.AppendInt(buf, rv.Int(), 10), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - return strconv.AppendUint(buf, rv.Uint(), 10), true - case reflect.Float32: - return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 32), true - case reflect.Float64: - return strconv.AppendFloat(buf, rv.Float(), 'g', -1, 64), true - case reflect.Bool: - return strconv.AppendBool(buf, rv.Bool()), true - case reflect.String: - s := rv.String() - return append(buf, s...), true - } - return -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1250.go b/vendor/github.com/denisenkom/go-mssqldb/cp1250.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1250.go rename to vendor/github.com/denisenkom/go-mssqldb/cp1250.go index 5c8094ec3cc83..8207366be764b 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1250.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp1250.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp1250 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1251.go b/vendor/github.com/denisenkom/go-mssqldb/cp1251.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1251.go rename to vendor/github.com/denisenkom/go-mssqldb/cp1251.go index dc5896770ca15..f5b81c3934cf4 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1251.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp1251.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp1251 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1252.go b/vendor/github.com/denisenkom/go-mssqldb/cp1252.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1252.go rename to vendor/github.com/denisenkom/go-mssqldb/cp1252.go index 5ae8703542f2e..ed705d35a7a27 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1252.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp1252.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp1252 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1253.go b/vendor/github.com/denisenkom/go-mssqldb/cp1253.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1253.go rename to vendor/github.com/denisenkom/go-mssqldb/cp1253.go index 52c8e07aa69ec..cb1e1a7623695 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1253.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp1253.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp1253 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1254.go b/vendor/github.com/denisenkom/go-mssqldb/cp1254.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1254.go rename to vendor/github.com/denisenkom/go-mssqldb/cp1254.go index 5d8864a521fe7..a4b09bb44f54c 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1254.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp1254.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp1254 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1255.go b/vendor/github.com/denisenkom/go-mssqldb/cp1255.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1255.go rename to vendor/github.com/denisenkom/go-mssqldb/cp1255.go index 60619895d92cc..97f9ee9e91330 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1255.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp1255.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp1255 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1256.go b/vendor/github.com/denisenkom/go-mssqldb/cp1256.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1256.go rename to vendor/github.com/denisenkom/go-mssqldb/cp1256.go index ffd04b3e5bb95..e91241b4489ec 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1256.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp1256.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp1256 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1257.go b/vendor/github.com/denisenkom/go-mssqldb/cp1257.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1257.go rename to vendor/github.com/denisenkom/go-mssqldb/cp1257.go index 492da72ea4d03..bd93e6f891a84 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1257.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp1257.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp1257 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1258.go b/vendor/github.com/denisenkom/go-mssqldb/cp1258.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1258.go rename to vendor/github.com/denisenkom/go-mssqldb/cp1258.go index 80be52c596645..4e1f8ac9438f5 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp1258.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp1258.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp1258 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp437.go b/vendor/github.com/denisenkom/go-mssqldb/cp437.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp437.go rename to vendor/github.com/denisenkom/go-mssqldb/cp437.go index 76dedfb8ef53e..f47f8ecc77b41 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp437.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp437.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp437 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp850.go b/vendor/github.com/denisenkom/go-mssqldb/cp850.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp850.go rename to vendor/github.com/denisenkom/go-mssqldb/cp850.go index 927ab249efa71..e6b3d16904462 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp850.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp850.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp850 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp874.go b/vendor/github.com/denisenkom/go-mssqldb/cp874.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp874.go rename to vendor/github.com/denisenkom/go-mssqldb/cp874.go index 723bf6c3926ab..9d691a1a59572 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp874.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp874.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp874 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp932.go b/vendor/github.com/denisenkom/go-mssqldb/cp932.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp932.go rename to vendor/github.com/denisenkom/go-mssqldb/cp932.go index 5fc1377424a85..980c55d815f60 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp932.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp932.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp932 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp936.go b/vendor/github.com/denisenkom/go-mssqldb/cp936.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp936.go rename to vendor/github.com/denisenkom/go-mssqldb/cp936.go index d1fac12e26bbd..fca5da76d4d09 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp936.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp936.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp936 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp949.go b/vendor/github.com/denisenkom/go-mssqldb/cp949.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp949.go rename to vendor/github.com/denisenkom/go-mssqldb/cp949.go index 52c708dfa5cf3..cddfcbc852261 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp949.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp949.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp949 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp950.go b/vendor/github.com/denisenkom/go-mssqldb/cp950.go similarity index 99% rename from vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp950.go rename to vendor/github.com/denisenkom/go-mssqldb/cp950.go index 1301cd0f05274..cbf25cb91a8c8 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/cp950.go +++ b/vendor/github.com/denisenkom/go-mssqldb/cp950.go @@ -1,4 +1,4 @@ -package cp +package mssql var cp950 *charsetMap = &charsetMap{ sb: [256]rune{ diff --git a/vendor/github.com/denisenkom/go-mssqldb/decimal.go b/vendor/github.com/denisenkom/go-mssqldb/decimal.go index 372f64b4eb148..76f3a6b5b49e4 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/decimal.go +++ b/vendor/github.com/denisenkom/go-mssqldb/decimal.go @@ -32,13 +32,7 @@ func (d Decimal) ToFloat64() float64 { return val } -const autoScale = 100 - func Float64ToDecimal(f float64) (Decimal, error) { - return Float64ToDecimalScale(f, autoScale) -} - -func Float64ToDecimalScale(f float64, scale uint8) (Decimal, error) { var dec Decimal if math.IsNaN(f) { return dec, errors.New("NaN") @@ -55,10 +49,10 @@ func Float64ToDecimalScale(f float64, scale uint8) (Decimal, error) { } dec.prec = 20 var integer float64 - for dec.scale = 0; dec.scale <= scale; dec.scale++ { + for dec.scale = 0; dec.scale <= 20; dec.scale++ { integer = f * scaletblflt64[dec.scale] _, frac := math.Modf(integer) - if frac == 0 && scale == autoScale { + if frac == 0 { break } } @@ -79,7 +73,7 @@ func init() { } } -func (d Decimal) BigInt() big.Int { +func (d Decimal) Bytes() []byte { bytes := make([]byte, 16) binary.BigEndian.PutUint32(bytes[0:4], d.integer[3]) binary.BigEndian.PutUint32(bytes[4:8], d.integer[2]) @@ -90,19 +84,9 @@ func (d Decimal) BigInt() big.Int { if !d.positive { x.Neg(&x) } - return x -} - -func (d Decimal) Bytes() []byte { - x := d.BigInt() return scaleBytes(x.String(), d.scale) } -func (d Decimal) UnscaledBytes() []byte { - x := d.BigInt() - return x.Bytes() -} - func scaleBytes(s string, scale uint8) []byte { z := make([]byte, 0, len(s)+1) if s[0] == '-' || s[0] == '+' { diff --git a/vendor/github.com/denisenkom/go-mssqldb/doc.go b/vendor/github.com/denisenkom/go-mssqldb/doc.go deleted file mode 100644 index 2e54929c572f9..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -// package mssql implements the TDS protocol used to connect to MS SQL Server (sqlserver) -// database servers. -// -// This package registers the driver: -// sqlserver: uses native "@" parameter placeholder names and does no pre-processing. -// -// If the ordinal position is used for query parameters, identifiers will be named -// "@p1", "@p2", ... "@pN". -// -// Please refer to the README for the format of the DSN. There are multiple DSN -// formats accepted: ADO style, ODBC style, and URL style. The following is an -// example of a URL style DSN: -// sqlserver://sa:mypass@localhost:1234?database=master&connection+timeout=30 -package mssql diff --git a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/collation.go b/vendor/github.com/denisenkom/go-mssqldb/internal/cp/collation.go deleted file mode 100644 index ae7b03bf137eb..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/internal/cp/collation.go +++ /dev/null @@ -1,20 +0,0 @@ -package cp - -// http://msdn.microsoft.com/en-us/library/dd340437.aspx - -type Collation struct { - LcidAndFlags uint32 - SortId uint8 -} - -func (c Collation) getLcid() uint32 { - return c.LcidAndFlags & 0x000fffff -} - -func (c Collation) getFlags() uint32 { - return (c.LcidAndFlags & 0x0ff00000) >> 20 -} - -func (c Collation) getVersion() uint32 { - return (c.LcidAndFlags & 0xf0000000) >> 28 -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/log.go b/vendor/github.com/denisenkom/go-mssqldb/log.go index 9b8c551e88d92..f350aed09988e 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/log.go +++ b/vendor/github.com/denisenkom/go-mssqldb/log.go @@ -4,26 +4,19 @@ import ( "log" ) -type Logger interface { - Printf(format string, v ...interface{}) - Println(v ...interface{}) -} - -type optionalLogger struct { - logger Logger -} +type Logger log.Logger -func (o optionalLogger) Printf(format string, v ...interface{}) { - if o.logger != nil { - o.logger.Printf(format, v...) +func (logger *Logger) Printf(format string, v ...interface{}) { + if logger != nil { + (*log.Logger)(logger).Printf(format, v...) } else { log.Printf(format, v...) } } -func (o optionalLogger) Println(v ...interface{}) { - if o.logger != nil { - o.logger.Println(v...) +func (logger *Logger) Println(v ...interface{}) { + if logger != nil { + (*log.Logger)(logger).Println(v...) } else { log.Println(v...) } diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql.go b/vendor/github.com/denisenkom/go-mssqldb/mssql.go index 9065da53dea51..9663651e7c60c 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/mssql.go +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql.go @@ -1,331 +1,122 @@ package mssql import ( - "context" "database/sql" "database/sql/driver" "encoding/binary" "errors" "fmt" "io" + "log" "math" "net" - "reflect" "strings" "time" - "unicode" ) -// ReturnStatus may be used to return the return value from a proc. -// -// var rs mssql.ReturnStatus -// _, err := db.Exec("theproc", &rs) -// log.Printf("return status = %d", rs) -type ReturnStatus int32 - -var driverInstance = &Driver{processQueryText: true} -var driverInstanceNoProcess = &Driver{processQueryText: false} - func init() { - sql.Register("mssql", driverInstance) - sql.Register("sqlserver", driverInstanceNoProcess) - createDialer = func(p *connectParams) Dialer { - return netDialer{&net.Dialer{KeepAlive: p.keepAlive}} - } -} - -var createDialer func(p *connectParams) Dialer - -type netDialer struct { - nd *net.Dialer -} - -func (d netDialer) DialContext(ctx context.Context, network string, addr string) (net.Conn, error) { - return d.nd.DialContext(ctx, network, addr) -} - -type Driver struct { - log optionalLogger - - processQueryText bool -} - -// OpenConnector opens a new connector. Useful to dial with a context. -func (d *Driver) OpenConnector(dsn string) (*Connector, error) { - params, err := parseConnectParams(dsn) - if err != nil { - return nil, err - } - return &Connector{ - params: params, - driver: d, - }, nil -} - -func (d *Driver) Open(dsn string) (driver.Conn, error) { - return d.open(context.Background(), dsn) -} - -func SetLogger(logger Logger) { - driverInstance.SetLogger(logger) - driverInstanceNoProcess.SetLogger(logger) + sql.Register("mssql", &MssqlDriver{}) } -func (d *Driver) SetLogger(logger Logger) { - d.log = optionalLogger{logger} +type MssqlDriver struct { + log *log.Logger } -// NewConnector creates a new connector from a DSN. -// The returned connector may be used with sql.OpenDB. -func NewConnector(dsn string) (*Connector, error) { - params, err := parseConnectParams(dsn) - if err != nil { - return nil, err - } - c := &Connector{ - params: params, - driver: driverInstanceNoProcess, - } - return c, nil +func (d *MssqlDriver) SetLogger(logger *log.Logger) { + d.log = logger } -// Connector holds the parsed DSN and is ready to make a new connection -// at any time. -// -// In the future, settings that cannot be passed through a string DSN -// may be set directly on the connector. -type Connector struct { - params connectParams - driver *Driver - - // SessionInitSQL is executed after marking a given session to be reset. - // When not present, the next query will still reset the session to the - // database defaults. - // - // When present the connection will immediately mark the session to - // be reset, then execute the SessionInitSQL text to setup the session - // that may be different from the base database defaults. - // - // For Example, the application relies on the following defaults - // but is not allowed to set them at the database system level. - // - // SET XACT_ABORT ON; - // SET TEXTSIZE -1; - // SET ANSI_NULLS ON; - // SET LOCK_TIMEOUT 10000; - // - // SessionInitSQL should not attempt to manually call sp_reset_connection. - // This will happen at the TDS layer. - // - // SessionInitSQL is optional. The session will be reset even if - // SessionInitSQL is empty. - SessionInitSQL string - - // Dialer sets a custom dialer for all network operations. - // If Dialer is not set, normal net dialers are used. - Dialer Dialer -} - -type Dialer interface { - DialContext(ctx context.Context, network string, addr string) (net.Conn, error) -} - -func (c *Connector) getDialer(p *connectParams) Dialer { - if c != nil && c.Dialer != nil { - return c.Dialer - } - return createDialer(p) -} - -type Conn struct { - connector *Connector - sess *tdsSession - transactionCtx context.Context - resetSession bool - - processQueryText bool - connectionGood bool - - outs map[string]interface{} - returnStatus *ReturnStatus -} - -func (c *Conn) setReturnStatus(s ReturnStatus) { - if c.returnStatus == nil { - return - } - *c.returnStatus = s -} - -func (c *Conn) checkBadConn(err error) error { - // this is a hack to address Issue #275 - // we set connectionGood flag to false if - // error indicates that connection is not usable - // but we return actual error instead of ErrBadConn - // this will cause connection to stay in a pool - // but next request to this connection will return ErrBadConn - - // it might be possible to revise this hack after - // https://github.com/golang/go/issues/20807 - // is implemented - switch err { - case nil: - return nil - case io.EOF: - c.connectionGood = false +func CheckBadConn(err error) error { + if err == io.EOF { return driver.ErrBadConn - case driver.ErrBadConn: - // It is an internal programming error if driver.ErrBadConn - // is ever passed to this function. driver.ErrBadConn should - // only ever be returned in response to a *mssql.Conn.connectionGood == false - // check in the external facing API. - panic("driver.ErrBadConn in checkBadConn. This should not happen.") } - switch err.(type) { + switch e := err.(type) { case net.Error: - c.connectionGood = false - return err - case StreamError: - c.connectionGood = false - return err + if e.Timeout() { + return e + } + return driver.ErrBadConn default: return err } } -func (c *Conn) clearOuts() { - c.outs = nil +type MssqlConn struct { + sess *tdsSession } -func (c *Conn) simpleProcessResp(ctx context.Context) error { +func (c *MssqlConn) Commit() error { + headers := []headerStruct{ + {hdrtype: dataStmHdrTransDescr, + data: transDescrHdr{c.sess.tranid, 1}.pack()}, + } + if err := sendCommitXact(c.sess.buf, headers, "", 0, 0, ""); err != nil { + return err + } + tokchan := make(chan tokenStruct, 5) - go processResponse(ctx, c.sess, tokchan, c.outs) - c.clearOuts() + go processResponse(c.sess, tokchan) for tok := range tokchan { switch token := tok.(type) { - case doneStruct: - if token.isError() { - return c.checkBadConn(token.getError()) - } case error: - return c.checkBadConn(token) + return token } } return nil } -func (c *Conn) Commit() error { - if !c.connectionGood { - return driver.ErrBadConn - } - if err := c.sendCommitRequest(); err != nil { - return c.checkBadConn(err) - } - return c.simpleProcessResp(c.transactionCtx) -} - -func (c *Conn) sendCommitRequest() error { +func (c *MssqlConn) Rollback() error { headers := []headerStruct{ {hdrtype: dataStmHdrTransDescr, data: transDescrHdr{c.sess.tranid, 1}.pack()}, } - reset := c.resetSession - c.resetSession = false - if err := sendCommitXact(c.sess.buf, headers, "", 0, 0, "", reset); err != nil { - if c.sess.logFlags&logErrors != 0 { - c.sess.log.Printf("Failed to send CommitXact with %v", err) - } - c.connectionGood = false - return fmt.Errorf("Faild to send CommitXact: %v", err) - } - return nil -} - -func (c *Conn) Rollback() error { - if !c.connectionGood { - return driver.ErrBadConn - } - if err := c.sendRollbackRequest(); err != nil { - return c.checkBadConn(err) + if err := sendRollbackXact(c.sess.buf, headers, "", 0, 0, ""); err != nil { + return err } - return c.simpleProcessResp(c.transactionCtx) -} -func (c *Conn) sendRollbackRequest() error { - headers := []headerStruct{ - {hdrtype: dataStmHdrTransDescr, - data: transDescrHdr{c.sess.tranid, 1}.pack()}, - } - reset := c.resetSession - c.resetSession = false - if err := sendRollbackXact(c.sess.buf, headers, "", 0, 0, "", reset); err != nil { - if c.sess.logFlags&logErrors != 0 { - c.sess.log.Printf("Failed to send RollbackXact with %v", err) + tokchan := make(chan tokenStruct, 5) + go processResponse(c.sess, tokchan) + for tok := range tokchan { + switch token := tok.(type) { + case error: + return token } - c.connectionGood = false - return fmt.Errorf("Failed to send RollbackXact: %v", err) } return nil } -func (c *Conn) Begin() (driver.Tx, error) { - return c.begin(context.Background(), isolationUseCurrent) -} - -func (c *Conn) begin(ctx context.Context, tdsIsolation isoLevel) (tx driver.Tx, err error) { - if !c.connectionGood { - return nil, driver.ErrBadConn - } - err = c.sendBeginRequest(ctx, tdsIsolation) - if err != nil { - return nil, c.checkBadConn(err) - } - tx, err = c.processBeginResponse(ctx) - if err != nil { - return nil, c.checkBadConn(err) - } - return -} - -func (c *Conn) sendBeginRequest(ctx context.Context, tdsIsolation isoLevel) error { - c.transactionCtx = ctx +func (c *MssqlConn) Begin() (driver.Tx, error) { headers := []headerStruct{ {hdrtype: dataStmHdrTransDescr, data: transDescrHdr{0, 1}.pack()}, } - reset := c.resetSession - c.resetSession = false - if err := sendBeginXact(c.sess.buf, headers, tdsIsolation, "", reset); err != nil { - if c.sess.logFlags&logErrors != 0 { - c.sess.log.Printf("Failed to send BeginXact with %v", err) - } - c.connectionGood = false - return fmt.Errorf("Failed to send BeginXact: %v", err) + if err := sendBeginXact(c.sess.buf, headers, 0, ""); err != nil { + return nil, CheckBadConn(err) } - return nil -} - -func (c *Conn) processBeginResponse(ctx context.Context) (driver.Tx, error) { - if err := c.simpleProcessResp(ctx); err != nil { - return nil, err + tokchan := make(chan tokenStruct, 5) + go processResponse(c.sess, tokchan) + for tok := range tokchan { + switch token := tok.(type) { + case error: + if c.sess.tranid != 0 { + return nil, token + } + return nil, CheckBadConn(token) + } } // successful BEGINXACT request will return sess.tranid // for started transaction return c, nil } -func (d *Driver) open(ctx context.Context, dsn string) (*Conn, error) { +func (d *MssqlDriver) Open(dsn string) (driver.Conn, error) { params, err := parseConnectParams(dsn) if err != nil { return nil, err } - return d.connect(ctx, nil, params) -} -// connect to the server, using the provided context for dialing only. -func (d *Driver) connect(ctx context.Context, c *Connector, params connectParams) (*Conn, error) { - sess, err := connect(ctx, c, d.log, params) + sess, err := connect(params) if err != nil { // main server failed, try fail-over partner if params.failOverPartner == "" { @@ -337,31 +128,24 @@ func (d *Driver) connect(ctx context.Context, c *Connector, params connectParams params.port = params.failOverPort } - sess, err = connect(ctx, c, d.log, params) + sess, err = connect(params) if err != nil { // fail-over partner also failed, now fail return nil, err } } - conn := &Conn{ - connector: c, - sess: sess, - transactionCtx: context.Background(), - processQueryText: d.processQueryText, - connectionGood: true, - } - conn.sess.log = d.log - + conn := &MssqlConn{sess} + conn.sess.log = (*Logger)(d.log) return conn, nil } -func (c *Conn) Close() error { +func (c *MssqlConn) Close() error { return c.sess.buf.transport.Close() } -type Stmt struct { - c *Conn +type MssqlStmt struct { + c *MssqlConn query string paramCount int notifSub *queryNotifSub @@ -373,29 +157,16 @@ type queryNotifSub struct { timeout uint32 } -func (c *Conn) Prepare(query string) (driver.Stmt, error) { - if !c.connectionGood { - return nil, driver.ErrBadConn - } - if len(query) > 10 && strings.EqualFold(query[:10], "INSERTBULK") { - return c.prepareCopyIn(context.Background(), query) - } - return c.prepareContext(context.Background(), query) +func (c *MssqlConn) Prepare(query string) (driver.Stmt, error) { + q, paramCount := parseParams(query) + return &MssqlStmt{c, q, paramCount, nil}, nil } -func (c *Conn) prepareContext(ctx context.Context, query string) (*Stmt, error) { - paramCount := -1 - if c.processQueryText { - query, paramCount = parseParams(query) - } - return &Stmt{c, query, paramCount, nil}, nil -} - -func (s *Stmt) Close() error { +func (s *MssqlStmt) Close() error { return nil } -func (s *Stmt) SetQueryNotification(id, options string, timeout time.Duration) { +func (s *MssqlStmt) SetQueryNotification(id, options string, timeout time.Duration) { to := uint32(timeout / time.Second) if to < 1 { to = 1 @@ -403,326 +174,183 @@ func (s *Stmt) SetQueryNotification(id, options string, timeout time.Duration) { s.notifSub = &queryNotifSub{id, options, to} } -func (s *Stmt) NumInput() int { +func (s *MssqlStmt) NumInput() int { return s.paramCount } -func (s *Stmt) sendQuery(args []namedValue) (err error) { +func (s *MssqlStmt) sendQuery(args []driver.Value) (err error) { headers := []headerStruct{ {hdrtype: dataStmHdrTransDescr, data: transDescrHdr{s.c.sess.tranid, 1}.pack()}, } if s.notifSub != nil { - headers = append(headers, - headerStruct{ - hdrtype: dataStmHdrQueryNotif, - data: queryNotifHdr{ - s.notifSub.msgText, - s.notifSub.options, - s.notifSub.timeout, - }.pack(), - }) + headers = append(headers, headerStruct{hdrtype: dataStmHdrQueryNotif, + data: queryNotifHdr{s.notifSub.msgText, s.notifSub.options, s.notifSub.timeout}.pack()}) } - conn := s.c - - // no need to check number of parameters here, it is checked by database/sql - if conn.sess.logFlags&logSQL != 0 { - conn.sess.log.Println(s.query) + if len(args) != s.paramCount { + return errors.New(fmt.Sprintf("sql: expected %d parameters, got %d", s.paramCount, len(args))) + } + if s.c.sess.logFlags&logSQL != 0 { + s.c.sess.log.Println(s.query) } - if conn.sess.logFlags&logParams != 0 && len(args) > 0 { + if s.c.sess.logFlags&logParams != 0 && len(args) > 0 { for i := 0; i < len(args); i++ { - if len(args[i].Name) > 0 { - s.c.sess.log.Printf("\t@%s\t%v\n", args[i].Name, args[i].Value) - } else { - s.c.sess.log.Printf("\t@p%d\t%v\n", i+1, args[i].Value) - } + s.c.sess.log.Printf("\t@p%d\t%v\n", i+1, args[i]) } - } - reset := conn.resetSession - conn.resetSession = false + } if len(args) == 0 { - if err = sendSqlBatch72(conn.sess.buf, s.query, headers, reset); err != nil { - if conn.sess.logFlags&logErrors != 0 { - conn.sess.log.Printf("Failed to send SqlBatch with %v", err) + if err = sendSqlBatch72(s.c.sess.buf, s.query, headers); err != nil { + if s.c.sess.tranid != 0 { + return err } - conn.connectionGood = false - return fmt.Errorf("failed to send SQL Batch: %v", err) + return CheckBadConn(err) } } else { - proc := sp_ExecuteSql - var params []param - if isProc(s.query) { - proc.name = s.query - params, _, err = s.makeRPCParams(args, 0) - if err != nil { - return - } - } else { - var decls []string - params, decls, err = s.makeRPCParams(args, 2) + params := make([]Param, len(args)+2) + decls := make([]string, len(args)) + params[0], err = s.makeParam(s.query) + if err != nil { + return + } + for i, val := range args { + params[i+2], err = s.makeParam(val) if err != nil { return } - params[0] = makeStrParam(s.query) - params[1] = makeStrParam(strings.Join(decls, ",")) - } - if err = sendRpc(conn.sess.buf, headers, proc, 0, params, reset); err != nil { - if conn.sess.logFlags&logErrors != 0 { - conn.sess.log.Printf("Failed to send Rpc with %v", err) - } - conn.connectionGood = false - return fmt.Errorf("Failed to send RPC: %v", err) - } - } - return -} - -// isProc takes the query text in s and determines if it is a stored proc name -// or SQL text. -func isProc(s string) bool { - if len(s) == 0 { - return false - } - const ( - outside = iota - text - escaped - ) - st := outside - var rn1, rPrev rune - for _, r := range s { - rPrev = rn1 - rn1 = r - switch r { - // No newlines or string sequences. - case '\n', '\r', '\'', ';': - return false - } - switch st { - case outside: - switch { - case unicode.IsSpace(r): - return false - case r == '[': - st = escaped - continue - case r == ']' && rPrev == ']': - st = escaped - continue - case unicode.IsLetter(r): - st = text - } - case text: - switch { - case r == '.': - st = outside - continue - case unicode.IsSpace(r): - return false - } - case escaped: - switch { - case r == ']': - st = outside - continue - } + name := fmt.Sprintf("@p%d", i+1) + params[i+2].Name = name + decls[i] = fmt.Sprintf("%s %s", name, makeDecl(params[i+2].ti)) } - } - return true -} - -func (s *Stmt) makeRPCParams(args []namedValue, offset int) ([]param, []string, error) { - var err error - params := make([]param, len(args)+offset) - decls := make([]string, len(args)) - for i, val := range args { - params[i+offset], err = s.makeParam(val.Value) + params[1], err = s.makeParam(strings.Join(decls, ",")) if err != nil { - return nil, nil, err - } - var name string - if len(val.Name) > 0 { - name = "@" + val.Name - } else { - name = fmt.Sprintf("@p%d", val.Ordinal) + return } - params[i+offset].Name = name - decls[i] = fmt.Sprintf("%s %s", name, makeDecl(params[i+offset].ti)) - } - return params, decls, nil -} - -type namedValue struct { - Name string - Ordinal int - Value driver.Value -} - -func convertOldArgs(args []driver.Value) []namedValue { - list := make([]namedValue, len(args)) - for i, v := range args { - list[i] = namedValue{ - Ordinal: i + 1, - Value: v, + if err = sendRpc(s.c.sess.buf, headers, Sp_ExecuteSql, 0, params); err != nil { + if s.c.sess.tranid != 0 { + return err + } + return CheckBadConn(err) } } - return list -} - -func (s *Stmt) Query(args []driver.Value) (driver.Rows, error) { - return s.queryContext(context.Background(), convertOldArgs(args)) + return } -func (s *Stmt) queryContext(ctx context.Context, args []namedValue) (rows driver.Rows, err error) { - if !s.c.connectionGood { - return nil, driver.ErrBadConn - } +func (s *MssqlStmt) Query(args []driver.Value) (res driver.Rows, err error) { if err = s.sendQuery(args); err != nil { - return nil, s.c.checkBadConn(err) + return } - return s.processQueryResponse(ctx) -} - -func (s *Stmt) processQueryResponse(ctx context.Context) (res driver.Rows, err error) { tokchan := make(chan tokenStruct, 5) - ctx, cancel := context.WithCancel(ctx) - go processResponse(ctx, s.c.sess, tokchan, s.c.outs) - s.c.clearOuts() + go processResponse(s.c.sess, tokchan) // process metadata - var cols []columnStruct + var cols []string loop: for tok := range tokchan { switch token := tok.(type) { - // By ignoring DONE token we effectively - // skip empty result-sets. - // This improves results in queries like that: + // by ignoring DONE token we effectively + // skip empty result-sets + // this improves results in queryes like that: // set nocount on; select 1 // see TestIgnoreEmptyResults test //case doneStruct: //break loop case []columnStruct: - cols = token - break loop - case doneStruct: - if token.isError() { - return nil, s.c.checkBadConn(token.getError()) + cols = make([]string, len(token)) + for i, col := range token { + cols[i] = col.ColName } - case ReturnStatus: - s.c.setReturnStatus(token) + break loop case error: - return nil, s.c.checkBadConn(token) + if s.c.sess.tranid != 0 { + return nil, token + } + return nil, CheckBadConn(token) } } - res = &Rows{stmt: s, tokchan: tokchan, cols: cols, cancel: cancel} - return + return &MssqlRows{sess: s.c.sess, tokchan: tokchan, cols: cols}, nil } -func (s *Stmt) Exec(args []driver.Value) (driver.Result, error) { - return s.exec(context.Background(), convertOldArgs(args)) -} - -func (s *Stmt) exec(ctx context.Context, args []namedValue) (res driver.Result, err error) { - if !s.c.connectionGood { - return nil, driver.ErrBadConn - } +func (s *MssqlStmt) Exec(args []driver.Value) (res driver.Result, err error) { if err = s.sendQuery(args); err != nil { - return nil, s.c.checkBadConn(err) - } - if res, err = s.processExec(ctx); err != nil { - return nil, s.c.checkBadConn(err) + return } - return -} - -func (s *Stmt) processExec(ctx context.Context) (res driver.Result, err error) { tokchan := make(chan tokenStruct, 5) - go processResponse(ctx, s.c.sess, tokchan, s.c.outs) - s.c.clearOuts() + go processResponse(s.c.sess, tokchan) var rowCount int64 for token := range tokchan { switch token := token.(type) { case doneInProcStruct: if token.Status&doneCount != 0 { - rowCount += int64(token.RowCount) + rowCount = int64(token.RowCount) } case doneStruct: if token.Status&doneCount != 0 { - rowCount += int64(token.RowCount) - } - if token.isError() { - return nil, token.getError() + rowCount = int64(token.RowCount) } - case ReturnStatus: - s.c.setReturnStatus(token) case error: - return nil, token + if s.c.sess.logFlags&logErrors != 0 { + s.c.sess.log.Println("got error:", token) + } + if s.c.sess.tranid != 0 { + return nil, token + } + return nil, CheckBadConn(token) } } - return &Result{s.c, rowCount}, nil + return &MssqlResult{s.c, rowCount}, nil } -type Rows struct { - stmt *Stmt - cols []columnStruct +type MssqlRows struct { + sess *tdsSession + cols []string tokchan chan tokenStruct - nextCols []columnStruct - - cancel func() + nextCols []string } -func (rc *Rows) Close() error { - rc.cancel() +func (rc *MssqlRows) Close() error { for _ = range rc.tokchan { } rc.tokchan = nil return nil } -func (rc *Rows) Columns() (res []string) { - res = make([]string, len(rc.cols)) - for i, col := range rc.cols { - res[i] = col.ColName - } - return +func (rc *MssqlRows) Columns() (res []string) { + return rc.cols } -func (rc *Rows) Next(dest []driver.Value) error { - if !rc.stmt.c.connectionGood { - return driver.ErrBadConn - } +func (rc *MssqlRows) Next(dest []driver.Value) (err error) { if rc.nextCols != nil { return io.EOF } for tok := range rc.tokchan { switch tokdata := tok.(type) { case []columnStruct: - rc.nextCols = tokdata + cols := make([]string, len(tokdata)) + for i, col := range tokdata { + cols[i] = col.ColName + } + rc.nextCols = cols return io.EOF case []interface{}: for i := range dest { dest[i] = tokdata[i] } return nil - case doneStruct: - if tokdata.isError() { - return rc.stmt.c.checkBadConn(tokdata.getError()) - } case error: - return rc.stmt.c.checkBadConn(tokdata) + return tokdata } } return io.EOF } -func (rc *Rows) HasNextResultSet() bool { +func (rc *MssqlRows) HasNextResultSet() bool { return rc.nextCols != nil } -func (rc *Rows) NextResultSet() error { +func (rc *MssqlRows) NextResultSet() error { rc.cols = rc.nextCols rc.nextCols = nil if rc.cols == nil { @@ -731,69 +359,11 @@ func (rc *Rows) NextResultSet() error { return nil } -// It should return -// the value type that can be used to scan types into. For example, the database -// column type "bigint" this should return "reflect.TypeOf(int64(0))". -func (r *Rows) ColumnTypeScanType(index int) reflect.Type { - return makeGoLangScanType(r.cols[index].ti) -} - -// RowsColumnTypeDatabaseTypeName may be implemented by Rows. It should return the -// database system type name without the length. Type names should be uppercase. -// Examples of returned types: "VARCHAR", "NVARCHAR", "VARCHAR2", "CHAR", "TEXT", -// "DECIMAL", "SMALLINT", "INT", "BIGINT", "BOOL", "[]BIGINT", "JSONB", "XML", -// "TIMESTAMP". -func (r *Rows) ColumnTypeDatabaseTypeName(index int) string { - return makeGoLangTypeName(r.cols[index].ti) -} - -// RowsColumnTypeLength may be implemented by Rows. It should return the length -// of the column type if the column is a variable length type. If the column is -// not a variable length type ok should return false. -// If length is not limited other than system limits, it should return math.MaxInt64. -// The following are examples of returned values for various types: -// TEXT (math.MaxInt64, true) -// varchar(10) (10, true) -// nvarchar(10) (10, true) -// decimal (0, false) -// int (0, false) -// bytea(30) (30, true) -func (r *Rows) ColumnTypeLength(index int) (int64, bool) { - return makeGoLangTypeLength(r.cols[index].ti) -} - -// It should return -// the precision and scale for decimal types. If not applicable, ok should be false. -// The following are examples of returned values for various types: -// decimal(38, 4) (38, 4, true) -// int (0, 0, false) -// decimal (math.MaxInt64, math.MaxInt64, true) -func (r *Rows) ColumnTypePrecisionScale(index int) (int64, int64, bool) { - return makeGoLangTypePrecisionScale(r.cols[index].ti) -} - -// The nullable value should -// be true if it is known the column may be null, or false if the column is known -// to be not nullable. -// If the column nullability is unknown, ok should be false. -func (r *Rows) ColumnTypeNullable(index int) (nullable, ok bool) { - nullable = r.cols[index].Flags&colFlagNullable != 0 - ok = true - return -} - -func makeStrParam(val string) (res param) { - res.ti.TypeId = typeNVarChar - res.buffer = str2ucs2(val) - res.ti.Size = len(res.buffer) - return -} - -func (s *Stmt) makeParam(val driver.Value) (res param, err error) { +func (s *MssqlStmt) makeParam(val driver.Value) (res Param, err error) { if val == nil { - res.ti.TypeId = typeNull + res.ti.TypeId = typeNVarChar res.buffer = nil - res.ti.Size = 0 + res.ti.Size = 2 return } switch val := val.(type) { @@ -802,34 +372,19 @@ func (s *Stmt) makeParam(val driver.Value) (res param, err error) { res.buffer = make([]byte, 8) res.ti.Size = 8 binary.LittleEndian.PutUint64(res.buffer, uint64(val)) - case sql.NullInt64: - // only null values should be getting here - res.ti.TypeId = typeIntN - res.ti.Size = 8 - res.buffer = []byte{} - case float64: res.ti.TypeId = typeFltN res.ti.Size = 8 res.buffer = make([]byte, 8) binary.LittleEndian.PutUint64(res.buffer, math.Float64bits(val)) - case sql.NullFloat64: - // only null values should be getting here - res.ti.TypeId = typeFltN - res.ti.Size = 8 - res.buffer = []byte{} - case []byte: res.ti.TypeId = typeBigVarBin res.ti.Size = len(val) res.buffer = val case string: - res = makeStrParam(val) - case sql.NullString: - // only null values should be getting here res.ti.TypeId = typeNVarChar - res.buffer = nil - res.ti.Size = 8000 + res.buffer = str2ucs2(val) + res.ti.Size = len(res.buffer) case bool: res.ti.TypeId = typeBitN res.ti.Size = 1 @@ -837,39 +392,55 @@ func (s *Stmt) makeParam(val driver.Value) (res param, err error) { if val { res.buffer[0] = 1 } - case sql.NullBool: - // only null values should be getting here - res.ti.TypeId = typeBitN - res.ti.Size = 1 - res.buffer = []byte{} - case time.Time: if s.c.sess.loginAck.TDSVersion >= verTDS73 { res.ti.TypeId = typeDateTimeOffsetN res.ti.Scale = 7 - res.buffer = encodeDateTimeOffset(val, int(res.ti.Scale)) - res.ti.Size = len(res.buffer) + res.ti.Size = 10 + buf := make([]byte, 10) + res.buffer = buf + days, ns := dateTime2(val) + ns /= 100 + buf[0] = byte(ns) + buf[1] = byte(ns >> 8) + buf[2] = byte(ns >> 16) + buf[3] = byte(ns >> 24) + buf[4] = byte(ns >> 32) + buf[5] = byte(days) + buf[6] = byte(days >> 8) + buf[7] = byte(days >> 16) + _, offset := val.Zone() + offset /= 60 + buf[8] = byte(offset) + buf[9] = byte(offset >> 8) } else { res.ti.TypeId = typeDateTimeN - res.buffer = encodeDateTime(val) - res.ti.Size = len(res.buffer) + res.ti.Size = 8 + res.buffer = make([]byte, 8) + ref := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) + dur := val.Sub(ref) + days := dur / (24 * time.Hour) + tm := (300 * (dur % (24 * time.Hour))) / time.Second + binary.LittleEndian.PutUint32(res.buffer[0:4], uint32(days)) + binary.LittleEndian.PutUint32(res.buffer[4:8], uint32(tm)) } default: - return s.makeParamExtra(val) + err = fmt.Errorf("mssql: unknown type for %T", val) + return } return } -type Result struct { - c *Conn +type MssqlResult struct { + c *MssqlConn rowsAffected int64 } -func (r *Result) RowsAffected() (int64, error) { +func (r *MssqlResult) RowsAffected() (int64, error) { return r.rowsAffected, nil } -func (r *Result) LastInsertId() (int64, error) { +func (r *MssqlResult) LastInsertId() (int64, error) { s, err := r.c.Prepare("select cast(@@identity as bigint)") if err != nil { return 0, err @@ -891,83 +462,3 @@ func (r *Result) LastInsertId() (int64, error) { lastInsertId := dest[0].(int64) return lastInsertId, nil } - -var _ driver.Pinger = &Conn{} - -// Ping is used to check if the remote server is available and satisfies the Pinger interface. -func (c *Conn) Ping(ctx context.Context) error { - if !c.connectionGood { - return driver.ErrBadConn - } - stmt := &Stmt{c, `select 1;`, 0, nil} - _, err := stmt.ExecContext(ctx, nil) - return err -} - -var _ driver.ConnBeginTx = &Conn{} - -// BeginTx satisfies ConnBeginTx. -func (c *Conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { - if !c.connectionGood { - return nil, driver.ErrBadConn - } - if opts.ReadOnly { - return nil, errors.New("Read-only transactions are not supported") - } - - var tdsIsolation isoLevel - switch sql.IsolationLevel(opts.Isolation) { - case sql.LevelDefault: - tdsIsolation = isolationUseCurrent - case sql.LevelReadUncommitted: - tdsIsolation = isolationReadUncommited - case sql.LevelReadCommitted: - tdsIsolation = isolationReadCommited - case sql.LevelWriteCommitted: - return nil, errors.New("LevelWriteCommitted isolation level is not supported") - case sql.LevelRepeatableRead: - tdsIsolation = isolationRepeatableRead - case sql.LevelSnapshot: - tdsIsolation = isolationSnapshot - case sql.LevelSerializable: - tdsIsolation = isolationSerializable - case sql.LevelLinearizable: - return nil, errors.New("LevelLinearizable isolation level is not supported") - default: - return nil, errors.New("Isolation level is not supported or unknown") - } - return c.begin(ctx, tdsIsolation) -} - -func (c *Conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { - if !c.connectionGood { - return nil, driver.ErrBadConn - } - if len(query) > 10 && strings.EqualFold(query[:10], "INSERTBULK") { - return c.prepareCopyIn(ctx, query) - } - - return c.prepareContext(ctx, query) -} - -func (s *Stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { - if !s.c.connectionGood { - return nil, driver.ErrBadConn - } - list := make([]namedValue, len(args)) - for i, nv := range args { - list[i] = namedValue(nv) - } - return s.queryContext(ctx, list) -} - -func (s *Stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { - if !s.c.connectionGood { - return nil, driver.ErrBadConn - } - list := make([]namedValue, len(args)) - for i, nv := range args { - list[i] = namedValue(nv) - } - return s.exec(ctx, list) -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go new file mode 100644 index 0000000000000..b8cffe9c01fe4 --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go @@ -0,0 +1,11 @@ +// +build go1.3 + +package mssql + +import ( + "net" +) + +func createDialer(p connectParams) *net.Dialer { + return &net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive} +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go new file mode 100644 index 0000000000000..3c7e72716d14c --- /dev/null +++ b/vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go @@ -0,0 +1,11 @@ +// +build !go1.3 + +package mssql + +import ( + "net" +) + +func createDialer(p *connectParams) *net.Dialer { + return &net.Dialer{Timeout: p.dial_timeout} +} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go deleted file mode 100644 index 833f047163778..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/mssql_go110.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build go1.10 - -package mssql - -import ( - "context" - "database/sql/driver" -) - -var _ driver.Connector = &Connector{} -var _ driver.SessionResetter = &Conn{} - -func (c *Conn) ResetSession(ctx context.Context) error { - if !c.connectionGood { - return driver.ErrBadConn - } - c.resetSession = true - - if c.connector == nil || len(c.connector.SessionInitSQL) == 0 { - return nil - } - - s, err := c.prepareContext(ctx, c.connector.SessionInitSQL) - if err != nil { - return driver.ErrBadConn - } - _, err = s.exec(ctx, nil) - if err != nil { - return driver.ErrBadConn - } - - return nil -} - -// Connect to the server and return a TDS connection. -func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { - conn, err := c.driver.connect(ctx, c, c.params) - if err == nil { - err = conn.ResetSession(ctx) - } - return conn, err -} - -// Driver underlying the Connector. -func (c *Connector) Driver() driver.Driver { - return c.driver -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go deleted file mode 100644 index 65a11720da21a..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/mssql_go19.go +++ /dev/null @@ -1,171 +0,0 @@ -// +build go1.9 - -package mssql - -import ( - "database/sql" - "database/sql/driver" - "errors" - "fmt" - "reflect" - "time" - - // "github.com/cockroachdb/apd" - "cloud.google.com/go/civil" -) - -// Type alias provided for compatibility. - -type MssqlDriver = Driver // Deprecated: users should transition to the new name when possible. -type MssqlBulk = Bulk // Deprecated: users should transition to the new name when possible. -type MssqlBulkOptions = BulkOptions // Deprecated: users should transition to the new name when possible. -type MssqlConn = Conn // Deprecated: users should transition to the new name when possible. -type MssqlResult = Result // Deprecated: users should transition to the new name when possible. -type MssqlRows = Rows // Deprecated: users should transition to the new name when possible. -type MssqlStmt = Stmt // Deprecated: users should transition to the new name when possible. - -var _ driver.NamedValueChecker = &Conn{} - -// VarChar parameter types. -type VarChar string - -type NVarCharMax string -type VarCharMax string - -// DateTime1 encodes parameters to original DateTime SQL types. -type DateTime1 time.Time - -// DateTimeOffset encodes parameters to DateTimeOffset, preserving the UTC offset. -type DateTimeOffset time.Time - -func convertInputParameter(val interface{}) (interface{}, error) { - switch v := val.(type) { - case VarChar: - return val, nil - case NVarCharMax: - return val, nil - case VarCharMax: - return val, nil - case DateTime1: - return val, nil - case DateTimeOffset: - return val, nil - case civil.Date: - return val, nil - case civil.DateTime: - return val, nil - case civil.Time: - return val, nil - // case *apd.Decimal: - // return nil - default: - return driver.DefaultParameterConverter.ConvertValue(v) - } -} - -func (c *Conn) CheckNamedValue(nv *driver.NamedValue) error { - switch v := nv.Value.(type) { - case sql.Out: - if c.outs == nil { - c.outs = make(map[string]interface{}) - } - c.outs[nv.Name] = v.Dest - - if v.Dest == nil { - return errors.New("destination is a nil pointer") - } - - dest_info := reflect.ValueOf(v.Dest) - if dest_info.Kind() != reflect.Ptr { - return errors.New("destination not a pointer") - } - - if dest_info.IsNil() { - return errors.New("destination is a nil pointer") - } - - pointed_value := reflect.Indirect(dest_info) - - // don't allow pointer to a pointer, only pointer to a value can be handled - // correctly - if pointed_value.Kind() == reflect.Ptr { - return errors.New("destination is a pointer to a pointer") - } - - // Unwrap the Out value and check the inner value. - val := pointed_value.Interface() - if val == nil { - return errors.New("MSSQL does not allow NULL value without type for OUTPUT parameters") - } - conv, err := convertInputParameter(val) - if err != nil { - return err - } - if conv == nil { - // if we replace with nil we would lose type information - nv.Value = sql.Out{Dest: val} - } else { - nv.Value = sql.Out{Dest: conv} - } - return nil - case *ReturnStatus: - *v = 0 // By default the return value should be zero. - c.returnStatus = v - return driver.ErrRemoveArgument - default: - var err error - nv.Value, err = convertInputParameter(nv.Value) - return err - } -} - -func (s *Stmt) makeParamExtra(val driver.Value) (res param, err error) { - switch val := val.(type) { - case VarChar: - res.ti.TypeId = typeBigVarChar - res.buffer = []byte(val) - res.ti.Size = len(res.buffer) - case VarCharMax: - res.ti.TypeId = typeBigVarChar - res.buffer = []byte(val) - res.ti.Size = 0 // currently zero forces varchar(max) - case NVarCharMax: - res.ti.TypeId = typeNVarChar - res.buffer = str2ucs2(string(val)) - res.ti.Size = 0 // currently zero forces nvarchar(max) - case DateTime1: - t := time.Time(val) - res.ti.TypeId = typeDateTimeN - res.buffer = encodeDateTime(t) - res.ti.Size = len(res.buffer) - case DateTimeOffset: - res.ti.TypeId = typeDateTimeOffsetN - res.ti.Scale = 7 - res.buffer = encodeDateTimeOffset(time.Time(val), int(res.ti.Scale)) - res.ti.Size = len(res.buffer) - case civil.Date: - res.ti.TypeId = typeDateN - res.buffer = encodeDate(val.In(time.UTC)) - res.ti.Size = len(res.buffer) - case civil.DateTime: - res.ti.TypeId = typeDateTime2N - res.ti.Scale = 7 - res.buffer = encodeDateTime2(val.In(time.UTC), int(res.ti.Scale)) - res.ti.Size = len(res.buffer) - case civil.Time: - res.ti.TypeId = typeTimeN - res.ti.Scale = 7 - res.buffer = encodeTime(val.Hour, val.Minute, val.Second, val.Nanosecond, int(res.ti.Scale)) - res.ti.Size = len(res.buffer) - case sql.Out: - res, err = s.makeParam(val.Dest) - res.Flags = fByRevValue - default: - err = fmt.Errorf("mssql: unknown type for %T", val) - } - return -} - -func scanIntoOut(name string, fromServer, scanInto interface{}) error { - return convertAssign(scanInto, fromServer) -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go b/vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go deleted file mode 100644 index 9680f5107e0d2..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/mssql_go19pre.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !go1.9 - -package mssql - -import ( - "database/sql/driver" - "fmt" -) - -func (s *Stmt) makeParamExtra(val driver.Value) (param, error) { - return param{}, fmt.Errorf("mssql: unknown type for %T", val) -} - -func scanIntoOut(name string, fromServer, scanInto interface{}) error { - return fmt.Errorf("mssql: unsupported OUTPUT type, use a newer Go version") -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/net.go b/vendor/github.com/denisenkom/go-mssqldb/net.go index e3864d1a22245..72a87340db5fd 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/net.go +++ b/vendor/github.com/denisenkom/go-mssqldb/net.go @@ -14,7 +14,7 @@ type timeoutConn struct { continueRead bool } -func newTimeoutConn(conn net.Conn, timeout time.Duration) *timeoutConn { +func NewTimeoutConn(conn net.Conn, timeout time.Duration) *timeoutConn { return &timeoutConn{ c: conn, timeout: timeout, @@ -33,7 +33,7 @@ func (c *timeoutConn) Read(b []byte) (n int, err error) { c.continueRead = false } if !c.continueRead { - var packet packetType + var packet uint8 packet, err = c.buf.BeginRead() if err != nil { err = fmt.Errorf("Cannot read handshake packet: %s", err.Error()) @@ -48,11 +48,9 @@ func (c *timeoutConn) Read(b []byte) (n int, err error) { n, err = c.buf.Read(b) return } - if c.timeout > 0 { - err = c.c.SetDeadline(time.Now().Add(c.timeout)) - if err != nil { - return - } + err = c.c.SetDeadline(time.Now().Add(c.timeout)) + if err != nil { + return } return c.c.Read(b) } @@ -60,7 +58,7 @@ func (c *timeoutConn) Read(b []byte) (n int, err error) { func (c *timeoutConn) Write(b []byte) (n int, err error) { if c.buf != nil { if !c.packetPending { - c.buf.BeginPacket(packPrelogin, false) + c.buf.BeginPacket(packPrelogin) c.packetPending = true } n, err = c.buf.Write(b) @@ -69,11 +67,9 @@ func (c *timeoutConn) Write(b []byte) (n int, err error) { } return } - if c.timeout > 0 { - err = c.c.SetDeadline(time.Now().Add(c.timeout)) - if err != nil { - return - } + err = c.c.SetDeadline(time.Now().Add(c.timeout)) + if err != nil { + return } return c.c.Write(b) } diff --git a/vendor/github.com/denisenkom/go-mssqldb/ntlm.go b/vendor/github.com/denisenkom/go-mssqldb/ntlm.go index 7c0cc4f785c39..f853435c6ebdc 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/ntlm.go +++ b/vendor/github.com/denisenkom/go-mssqldb/ntlm.go @@ -15,56 +15,56 @@ import ( ) const ( - _NEGOTIATE_MESSAGE = 1 - _CHALLENGE_MESSAGE = 2 - _AUTHENTICATE_MESSAGE = 3 + NEGOTIATE_MESSAGE = 1 + CHALLENGE_MESSAGE = 2 + AUTHENTICATE_MESSAGE = 3 ) const ( - _NEGOTIATE_UNICODE = 0x00000001 - _NEGOTIATE_OEM = 0x00000002 - _NEGOTIATE_TARGET = 0x00000004 - _NEGOTIATE_SIGN = 0x00000010 - _NEGOTIATE_SEAL = 0x00000020 - _NEGOTIATE_DATAGRAM = 0x00000040 - _NEGOTIATE_LMKEY = 0x00000080 - _NEGOTIATE_NTLM = 0x00000200 - _NEGOTIATE_ANONYMOUS = 0x00000800 - _NEGOTIATE_OEM_DOMAIN_SUPPLIED = 0x00001000 - _NEGOTIATE_OEM_WORKSTATION_SUPPLIED = 0x00002000 - _NEGOTIATE_ALWAYS_SIGN = 0x00008000 - _NEGOTIATE_TARGET_TYPE_DOMAIN = 0x00010000 - _NEGOTIATE_TARGET_TYPE_SERVER = 0x00020000 - _NEGOTIATE_EXTENDED_SESSIONSECURITY = 0x00080000 - _NEGOTIATE_IDENTIFY = 0x00100000 - _REQUEST_NON_NT_SESSION_KEY = 0x00400000 - _NEGOTIATE_TARGET_INFO = 0x00800000 - _NEGOTIATE_VERSION = 0x02000000 - _NEGOTIATE_128 = 0x20000000 - _NEGOTIATE_KEY_EXCH = 0x40000000 - _NEGOTIATE_56 = 0x80000000 + NEGOTIATE_UNICODE = 0x00000001 + NEGOTIATE_OEM = 0x00000002 + NEGOTIATE_TARGET = 0x00000004 + NEGOTIATE_SIGN = 0x00000010 + NEGOTIATE_SEAL = 0x00000020 + NEGOTIATE_DATAGRAM = 0x00000040 + NEGOTIATE_LMKEY = 0x00000080 + NEGOTIATE_NTLM = 0x00000200 + NEGOTIATE_ANONYMOUS = 0x00000800 + NEGOTIATE_OEM_DOMAIN_SUPPLIED = 0x00001000 + NEGOTIATE_OEM_WORKSTATION_SUPPLIED = 0x00002000 + NEGOTIATE_ALWAYS_SIGN = 0x00008000 + NEGOTIATE_TARGET_TYPE_DOMAIN = 0x00010000 + NEGOTIATE_TARGET_TYPE_SERVER = 0x00020000 + NEGOTIATE_EXTENDED_SESSIONSECURITY = 0x00080000 + NEGOTIATE_IDENTIFY = 0x00100000 + REQUEST_NON_NT_SESSION_KEY = 0x00400000 + NEGOTIATE_TARGET_INFO = 0x00800000 + NEGOTIATE_VERSION = 0x02000000 + NEGOTIATE_128 = 0x20000000 + NEGOTIATE_KEY_EXCH = 0x40000000 + NEGOTIATE_56 = 0x80000000 ) -const _NEGOTIATE_FLAGS = _NEGOTIATE_UNICODE | - _NEGOTIATE_NTLM | - _NEGOTIATE_OEM_DOMAIN_SUPPLIED | - _NEGOTIATE_OEM_WORKSTATION_SUPPLIED | - _NEGOTIATE_ALWAYS_SIGN | - _NEGOTIATE_EXTENDED_SESSIONSECURITY +const NEGOTIATE_FLAGS = NEGOTIATE_UNICODE | + NEGOTIATE_NTLM | + NEGOTIATE_OEM_DOMAIN_SUPPLIED | + NEGOTIATE_OEM_WORKSTATION_SUPPLIED | + NEGOTIATE_ALWAYS_SIGN | + NEGOTIATE_EXTENDED_SESSIONSECURITY -type ntlmAuth struct { +type NTLMAuth struct { Domain string UserName string Password string Workstation string } -func getAuth(user, password, service, workstation string) (auth, bool) { +func getAuth(user, password, service, workstation string) (Auth, bool) { if !strings.ContainsRune(user, '\\') { return nil, false } domain_user := strings.SplitN(user, "\\", 2) - return &ntlmAuth{ + return &NTLMAuth{ Domain: domain_user[0], UserName: domain_user[1], Password: password, @@ -86,13 +86,13 @@ func utf16le(val string) []byte { return v } -func (auth *ntlmAuth) InitialBytes() ([]byte, error) { +func (auth *NTLMAuth) InitialBytes() ([]byte, error) { domain_len := len(auth.Domain) workstation_len := len(auth.Workstation) msg := make([]byte, 40+domain_len+workstation_len) copy(msg, []byte("NTLMSSP\x00")) - binary.LittleEndian.PutUint32(msg[8:], _NEGOTIATE_MESSAGE) - binary.LittleEndian.PutUint32(msg[12:], _NEGOTIATE_FLAGS) + binary.LittleEndian.PutUint32(msg[8:], NEGOTIATE_MESSAGE) + binary.LittleEndian.PutUint32(msg[12:], NEGOTIATE_FLAGS) // Domain Name Fields binary.LittleEndian.PutUint16(msg[16:], uint16(domain_len)) binary.LittleEndian.PutUint16(msg[18:], uint16(domain_len)) @@ -198,11 +198,11 @@ func ntlmSessionResponse(clientNonce [8]byte, serverChallenge [8]byte, password return response(hash, passwordHash) } -func (auth *ntlmAuth) NextBytes(bytes []byte) ([]byte, error) { +func (auth *NTLMAuth) NextBytes(bytes []byte) ([]byte, error) { if string(bytes[0:8]) != "NTLMSSP\x00" { return nil, errorNTLM } - if binary.LittleEndian.Uint32(bytes[8:12]) != _CHALLENGE_MESSAGE { + if binary.LittleEndian.Uint32(bytes[8:12]) != CHALLENGE_MESSAGE { return nil, errorNTLM } flags := binary.LittleEndian.Uint32(bytes[20:24]) @@ -210,7 +210,7 @@ func (auth *ntlmAuth) NextBytes(bytes []byte) ([]byte, error) { copy(challenge[:], bytes[24:32]) var lm, nt []byte - if (flags & _NEGOTIATE_EXTENDED_SESSIONSECURITY) != 0 { + if (flags & NEGOTIATE_EXTENDED_SESSIONSECURITY) != 0 { nonce := clientChallenge() var lm_bytes [24]byte copy(lm_bytes[:8], nonce[:]) @@ -235,7 +235,7 @@ func (auth *ntlmAuth) NextBytes(bytes []byte) ([]byte, error) { msg := make([]byte, 88+lm_len+nt_len+domain_len+user_len+workstation_len) copy(msg, []byte("NTLMSSP\x00")) - binary.LittleEndian.PutUint32(msg[8:], _AUTHENTICATE_MESSAGE) + binary.LittleEndian.PutUint32(msg[8:], AUTHENTICATE_MESSAGE) // Lm Challenge Response Fields binary.LittleEndian.PutUint16(msg[12:], uint16(lm_len)) binary.LittleEndian.PutUint16(msg[14:], uint16(lm_len)) @@ -279,5 +279,5 @@ func (auth *ntlmAuth) NextBytes(bytes []byte) ([]byte, error) { return msg, nil } -func (auth *ntlmAuth) Free() { +func (auth *NTLMAuth) Free() { } diff --git a/vendor/github.com/denisenkom/go-mssqldb/parser.go b/vendor/github.com/denisenkom/go-mssqldb/parser.go index 8021ca603c954..9e37c16a655c1 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/parser.go +++ b/vendor/github.com/denisenkom/go-mssqldb/parser.go @@ -11,9 +11,6 @@ type parser struct { w bytes.Buffer paramCount int paramMax int - - // using map as a set - namedParams map[string]bool } func (p *parser) next() (rune, bool) { @@ -42,14 +39,13 @@ type stateFunc func(*parser) stateFunc func parseParams(query string) (string, int) { p := &parser{ - r: bytes.NewReader([]byte(query)), - namedParams: map[string]bool{}, + r: bytes.NewReader([]byte(query)), } state := parseNormal for state != nil { state = state(p) } - return p.w.String(), p.paramMax + len(p.namedParams) + return p.w.String(), p.paramMax } func parseNormal(p *parser) stateFunc { @@ -59,7 +55,7 @@ func parseNormal(p *parser) stateFunc { return nil } if ch == '?' { - return parseOrdinalParameter + return parseParameter } else if ch == '$' || ch == ':' { ch2, ok := p.next() if !ok { @@ -68,9 +64,7 @@ func parseNormal(p *parser) stateFunc { } p.unread() if ch2 >= '0' && ch2 <= '9' { - return parseOrdinalParameter - } else if 'a' <= ch2 && ch2 <= 'z' || 'A' <= ch2 && ch2 <= 'Z' { - return parseNamedParameter + return parseParameter } } p.write(ch) @@ -89,7 +83,7 @@ func parseNormal(p *parser) stateFunc { } } -func parseOrdinalParameter(p *parser) stateFunc { +func parseParameter(p *parser) stateFunc { var paramN int var ok bool for { @@ -119,30 +113,6 @@ func parseOrdinalParameter(p *parser) stateFunc { return parseNormal } -func parseNamedParameter(p *parser) stateFunc { - var paramName string - var ok bool - for { - var ch rune - ch, ok = p.next() - if ok && (ch >= '0' && ch <= '9' || 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z') { - paramName = paramName + string(ch) - } else { - break - } - } - if ok { - p.unread() - } - p.namedParams[paramName] = true - p.w.WriteString("@") - p.w.WriteString(paramName) - if !ok { - return nil - } - return parseNormal -} - func parseQuote(p *parser) stateFunc { for { ch, ok := p.next() diff --git a/vendor/github.com/denisenkom/go-mssqldb/rpc.go b/vendor/github.com/denisenkom/go-mssqldb/rpc.go index 4ca22578fae19..00b9b1e217b68 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/rpc.go +++ b/vendor/github.com/denisenkom/go-mssqldb/rpc.go @@ -4,7 +4,7 @@ import ( "encoding/binary" ) -type procId struct { +type ProcId struct { id uint16 name string } @@ -15,13 +15,24 @@ const ( fDefaultValue = 2 ) -type param struct { +type Param struct { Name string Flags uint8 ti typeInfo buffer []byte } +func MakeProcId(name string) (res ProcId) { + res.name = name + if len(name) == 0 { + panic("Proc name shouln't be empty") + } + if len(name) >= 0xffff { + panic("Invalid length of procedure name, should be less than 0xffff") + } + return res +} + const ( fWithRecomp = 1 fNoMetaData = 2 @@ -29,25 +40,25 @@ const ( ) var ( - sp_Cursor = procId{1, ""} - sp_CursorOpen = procId{2, ""} - sp_CursorPrepare = procId{3, ""} - sp_CursorExecute = procId{4, ""} - sp_CursorPrepExec = procId{5, ""} - sp_CursorUnprepare = procId{6, ""} - sp_CursorFetch = procId{7, ""} - sp_CursorOption = procId{8, ""} - sp_CursorClose = procId{9, ""} - sp_ExecuteSql = procId{10, ""} - sp_Prepare = procId{11, ""} - sp_PrepExec = procId{13, ""} - sp_PrepExecRpc = procId{14, ""} - sp_Unprepare = procId{15, ""} + Sp_Cursor = ProcId{1, ""} + Sp_CursorOpen = ProcId{2, ""} + Sp_CursorPrepare = ProcId{3, ""} + Sp_CursorExecute = ProcId{4, ""} + Sp_CursorPrepExec = ProcId{5, ""} + Sp_CursorUnprepare = ProcId{6, ""} + Sp_CursorFetch = ProcId{7, ""} + Sp_CursorOption = ProcId{8, ""} + Sp_CursorClose = ProcId{9, ""} + Sp_ExecuteSql = ProcId{10, ""} + Sp_Prepare = ProcId{11, ""} + Sp_PrepExec = ProcId{13, ""} + Sp_PrepExecRpc = ProcId{14, ""} + Sp_Unprepare = ProcId{15, ""} ) // http://msdn.microsoft.com/en-us/library/dd357576.aspx -func sendRpc(buf *tdsBuffer, headers []headerStruct, proc procId, flags uint16, params []param, resetSession bool) (err error) { - buf.BeginPacket(packRPCRequest, resetSession) +func sendRpc(buf *tdsBuffer, headers []headerStruct, proc ProcId, flags uint16, params []Param) (err error) { + buf.BeginPacket(packRPCRequest) writeAllHeaders(buf, headers) if len(proc.name) == 0 { var idswitch uint16 = 0xffff diff --git a/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go b/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go index 9b5bc6893f061..a6e95051c9fde 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go +++ b/vendor/github.com/denisenkom/go-mssqldb/sspi_windows.go @@ -113,7 +113,7 @@ type SSPIAuth struct { ctxt SecHandle } -func getAuth(user, password, service, workstation string) (auth, bool) { +func getAuth(user, password, service, workstation string) (Auth, bool) { if user == "" { return &SSPIAuth{Service: service}, true } diff --git a/vendor/github.com/denisenkom/go-mssqldb/tds.go b/vendor/github.com/denisenkom/go-mssqldb/tds.go index 16d9ca826453b..fd42dba34a26a 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/tds.go +++ b/vendor/github.com/denisenkom/go-mssqldb/tds.go @@ -1,7 +1,6 @@ package mssql import ( - "context" "crypto/tls" "crypto/x509" "encoding/binary" @@ -10,13 +9,11 @@ import ( "io" "io/ioutil" "net" - "net/url" "os" "sort" "strconv" "strings" "time" - "unicode" "unicode/utf16" "unicode/utf8" ) @@ -50,16 +47,13 @@ func parseInstances(msg []byte) map[string]map[string]string { return results } -func getInstances(ctx context.Context, d Dialer, address string) (map[string]map[string]string, error) { - maxTime := 5 * time.Second - ctx, cancel := context.WithTimeout(ctx, maxTime) - defer cancel() - conn, err := d.DialContext(ctx, "udp", address+":1434") +func getInstances(address string) (map[string]map[string]string, error) { + conn, err := net.DialTimeout("udp", address+":1434", 5*time.Second) if err != nil { return nil, err } defer conn.Close() - conn.SetDeadline(time.Now().Add(maxTime)) + conn.SetDeadline(time.Now().Add(5 * time.Second)) _, err = conn.Write([]byte{3}) if err != nil { return nil, err @@ -85,16 +79,11 @@ const ( ) // packet types -// https://msdn.microsoft.com/en-us/library/dd304214.aspx const ( - packSQLBatch packetType = 1 - packRPCRequest = 3 - packReply = 4 - - // 2.2.1.7 Attention: https://msdn.microsoft.com/en-us/library/dd341449.aspx - // 4.19.2 Out-of-Band Attention Signal: https://msdn.microsoft.com/en-us/library/dd305167.aspx - packAttention = 6 - + packSQLBatch = 1 + packRPCRequest = 3 + packReply = 4 + packCancel = 6 packBulkLoadBCP = 7 packTransMgrReq = 14 packNormal = 15 @@ -130,7 +119,7 @@ type tdsSession struct { columns []columnStruct tranid uint64 logFlags uint64 - log optionalLogger + log *Logger routedServer string routedPort uint16 } @@ -142,7 +131,6 @@ const ( logSQL = 8 logParams = 16 logTransaction = 32 - logDebug = 64 ) type columnStruct struct { @@ -152,19 +140,19 @@ type columnStruct struct { ti typeInfo } -type keySlice []uint8 +type KeySlice []uint8 -func (p keySlice) Len() int { return len(p) } -func (p keySlice) Less(i, j int) bool { return p[i] < p[j] } -func (p keySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p KeySlice) Len() int { return len(p) } +func (p KeySlice) Less(i, j int) bool { return p[i] < p[j] } +func (p KeySlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } // http://msdn.microsoft.com/en-us/library/dd357559.aspx func writePrelogin(w *tdsBuffer, fields map[uint8][]byte) error { var err error - w.BeginPacket(packPrelogin, false) + w.BeginPacket(packPrelogin) offset := uint16(5*len(fields) + 1) - keys := make(keySlice, 0, len(fields)) + keys := make(KeySlice, 0, len(fields)) for k, _ := range fields { keys = append(keys, k) } @@ -352,7 +340,7 @@ func manglePassword(password string) []byte { // http://msdn.microsoft.com/en-us/library/dd304019.aspx func sendLogin(w *tdsBuffer, login login) error { - w.BeginPacket(packLogin7, false) + w.BeginPacket(packLogin7) hostname := str2ucs2(login.HostName) username := str2ucs2(login.UserName) password := manglePassword(login.Password) @@ -502,11 +490,6 @@ func readBVarChar(r io.Reader) (res string, err error) { if err != nil { return "", err } - - // A zero length could be returned, return an empty string - if numchars == 0 { - return "", nil - } return readUcs2(r, int(numchars)) } @@ -605,7 +588,7 @@ func (hdr transDescrHdr) pack() (res []byte) { } func writeAllHeaders(w io.Writer, headers []headerStruct) (err error) { - // Calculating total length. + // calculatint total length var totallen uint32 = 4 for _, hdr := range headers { totallen += 4 + 2 + uint32(len(hdr.data)) @@ -633,8 +616,10 @@ func writeAllHeaders(w io.Writer, headers []headerStruct) (err error) { return nil } -func sendSqlBatch72(buf *tdsBuffer, sqltext string, headers []headerStruct, resetSession bool) (err error) { - buf.BeginPacket(packSQLBatch, resetSession) +func sendSqlBatch72(buf *tdsBuffer, + sqltext string, + headers []headerStruct) (err error) { + buf.BeginPacket(packSQLBatch) if err = writeAllHeaders(buf, headers); err != nil { return @@ -647,13 +632,6 @@ func sendSqlBatch72(buf *tdsBuffer, sqltext string, headers []headerStruct, rese return buf.FinishPacket() } -// 2.2.1.7 Attention: https://msdn.microsoft.com/en-us/library/dd341449.aspx -// 4.19.2 Out-of-Band Attention Signal: https://msdn.microsoft.com/en-us/library/dd305167.aspx -func sendAttention(buf *tdsBuffer) error { - buf.BeginPacket(packAttention, false) - return buf.FinishPacket() -} - type connectParams struct { logFlags uint64 port uint64 @@ -676,7 +654,6 @@ type connectParams struct { typeFlags uint8 failOverPartner string failOverPort uint64 - packetSize uint16 } func splitConnectionString(dsn string) (res map[string]string) { @@ -700,251 +677,19 @@ func splitConnectionString(dsn string) (res map[string]string) { return res } -// Splits a URL in the ODBC format -func splitConnectionStringOdbc(dsn string) (map[string]string, error) { - res := map[string]string{} - - type parserState int - const ( - // Before the start of a key - parserStateBeforeKey parserState = iota - - // Inside a key - parserStateKey - - // Beginning of a value. May be bare or braced - parserStateBeginValue - - // Inside a bare value - parserStateBareValue - - // Inside a braced value - parserStateBracedValue - - // A closing brace inside a braced value. - // May be the end of the value or an escaped closing brace, depending on the next character - parserStateBracedValueClosingBrace - - // After a value. Next character should be a semicolon or whitespace. - parserStateEndValue - ) - - var state = parserStateBeforeKey - - var key string - var value string - - for i, c := range dsn { - switch state { - case parserStateBeforeKey: - switch { - case c == '=': - return res, fmt.Errorf("Unexpected character = at index %d. Expected start of key or semi-colon or whitespace.", i) - case !unicode.IsSpace(c) && c != ';': - state = parserStateKey - key += string(c) - } - - case parserStateKey: - switch c { - case '=': - key = normalizeOdbcKey(key) - if len(key) == 0 { - return res, fmt.Errorf("Unexpected end of key at index %d.", i) - } - - state = parserStateBeginValue - - case ';': - // Key without value - key = normalizeOdbcKey(key) - if len(key) == 0 { - return res, fmt.Errorf("Unexpected end of key at index %d.", i) - } - - res[key] = value - key = "" - value = "" - state = parserStateBeforeKey - - default: - key += string(c) - } - - case parserStateBeginValue: - switch { - case c == '{': - state = parserStateBracedValue - case c == ';': - // Empty value - res[key] = value - key = "" - state = parserStateBeforeKey - case unicode.IsSpace(c): - // Ignore whitespace - default: - state = parserStateBareValue - value += string(c) - } - - case parserStateBareValue: - if c == ';' { - res[key] = strings.TrimRightFunc(value, unicode.IsSpace) - key = "" - value = "" - state = parserStateBeforeKey - } else { - value += string(c) - } - - case parserStateBracedValue: - if c == '}' { - state = parserStateBracedValueClosingBrace - } else { - value += string(c) - } - - case parserStateBracedValueClosingBrace: - if c == '}' { - // Escaped closing brace - value += string(c) - state = parserStateBracedValue - continue - } - - // End of braced value - res[key] = value - key = "" - value = "" - - // This character is the first character past the end, - // so it needs to be parsed like the parserStateEndValue state. - state = parserStateEndValue - switch { - case c == ';': - state = parserStateBeforeKey - case unicode.IsSpace(c): - // Ignore whitespace - default: - return res, fmt.Errorf("Unexpected character %c at index %d. Expected semi-colon or whitespace.", c, i) - } - - case parserStateEndValue: - switch { - case c == ';': - state = parserStateBeforeKey - case unicode.IsSpace(c): - // Ignore whitespace - default: - return res, fmt.Errorf("Unexpected character %c at index %d. Expected semi-colon or whitespace.", c, i) - } - } - } - - switch state { - case parserStateBeforeKey: // Okay - case parserStateKey: // Unfinished key. Treat as key without value. - key = normalizeOdbcKey(key) - if len(key) == 0 { - return res, fmt.Errorf("Unexpected end of key at index %d.", len(dsn)) - } - res[key] = value - case parserStateBeginValue: // Empty value - res[key] = value - case parserStateBareValue: - res[key] = strings.TrimRightFunc(value, unicode.IsSpace) - case parserStateBracedValue: - return res, fmt.Errorf("Unexpected end of braced value at index %d.", len(dsn)) - case parserStateBracedValueClosingBrace: // End of braced value - res[key] = value - case parserStateEndValue: // Okay - } - - return res, nil -} - -// Normalizes the given string as an ODBC-format key -func normalizeOdbcKey(s string) string { - return strings.ToLower(strings.TrimRightFunc(s, unicode.IsSpace)) -} - -// Splits a URL of the form sqlserver://username:password@host/instance?param1=value¶m2=value -func splitConnectionStringURL(dsn string) (map[string]string, error) { - res := map[string]string{} - - u, err := url.Parse(dsn) - if err != nil { - return res, err - } - - if u.Scheme != "sqlserver" { - return res, fmt.Errorf("scheme %s is not recognized", u.Scheme) - } - - if u.User != nil { - res["user id"] = u.User.Username() - p, exists := u.User.Password() - if exists { - res["password"] = p - } - } - - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - host = u.Host - } - - if len(u.Path) > 0 { - res["server"] = host + "\\" + u.Path[1:] - } else { - res["server"] = host - } - - if len(port) > 0 { - res["port"] = port - } - - query := u.Query() - for k, v := range query { - if len(v) > 1 { - return res, fmt.Errorf("key %s provided more than once", k) - } - res[strings.ToLower(k)] = v[0] - } - - return res, nil -} - func parseConnectParams(dsn string) (connectParams, error) { + params := splitConnectionString(dsn) var p connectParams - - var params map[string]string - if strings.HasPrefix(dsn, "odbc:") { - parameters, err := splitConnectionStringOdbc(dsn[len("odbc:"):]) - if err != nil { - return p, err - } - params = parameters - } else if strings.HasPrefix(dsn, "sqlserver://") { - parameters, err := splitConnectionStringURL(dsn) - if err != nil { - return p, err - } - params = parameters - } else { - params = splitConnectionString(dsn) - } - strlog, ok := params["log"] if ok { var err error - p.logFlags, err = strconv.ParseUint(strlog, 10, 64) + p.logFlags, err = strconv.ParseUint(strlog, 10, 0) if err != nil { return p, fmt.Errorf("Invalid log parameter '%s': %s", strlog, err.Error()) } } server := params["server"] - parts := strings.SplitN(server, `\`, 2) + parts := strings.SplitN(server, "\\", 2) p.host = parts[0] if p.host == "." || strings.ToUpper(p.host) == "(LOCAL)" || p.host == "" { p.host = "localhost" @@ -960,64 +705,36 @@ func parseConnectParams(dsn string) (connectParams, error) { strport, ok := params["port"] if ok { var err error - p.port, err = strconv.ParseUint(strport, 10, 16) + p.port, err = strconv.ParseUint(strport, 0, 16) if err != nil { f := "Invalid tcp port '%v': %v" return p, fmt.Errorf(f, strport, err.Error()) } } - // https://docs.microsoft.com/en-us/sql/database-engine/configure-windows/configure-the-network-packet-size-server-configuration-option - // Default packet size remains at 4096 bytes - p.packetSize = 4096 - strpsize, ok := params["packet size"] + p.dial_timeout = 5 * time.Second + p.conn_timeout = 30 * time.Second + strconntimeout, ok := params["connection timeout"] if ok { - var err error - psize, err := strconv.ParseUint(strpsize, 0, 16) - if err != nil { - f := "Invalid packet size '%v': %v" - return p, fmt.Errorf(f, strpsize, err.Error()) - } - - // Ensure packet size falls within the TDS protocol range of 512 to 32767 bytes - // NOTE: Encrypted connections have a maximum size of 16383 bytes. If you request - // a higher packet size, the server will respond with an ENVCHANGE request to - // alter the packet size to 16383 bytes. - p.packetSize = uint16(psize) - if p.packetSize < 512 { - p.packetSize = 512 - } else if p.packetSize > 32767 { - p.packetSize = 32767 - } - } - - // https://msdn.microsoft.com/en-us/library/dd341108.aspx - // - // Do not set a connection timeout. Use Context to manage such things. - // Default to zero, but still allow it to be set. - if strconntimeout, ok := params["connection timeout"]; ok { - timeout, err := strconv.ParseUint(strconntimeout, 10, 64) + timeout, err := strconv.ParseUint(strconntimeout, 0, 16) if err != nil { f := "Invalid connection timeout '%v': %v" return p, fmt.Errorf(f, strconntimeout, err.Error()) } p.conn_timeout = time.Duration(timeout) * time.Second } - p.dial_timeout = 15 * time.Second - if strdialtimeout, ok := params["dial timeout"]; ok { - timeout, err := strconv.ParseUint(strdialtimeout, 10, 64) + strdialtimeout, ok := params["dial timeout"] + if ok { + timeout, err := strconv.ParseUint(strdialtimeout, 0, 16) if err != nil { f := "Invalid dial timeout '%v': %v" return p, fmt.Errorf(f, strdialtimeout, err.Error()) } p.dial_timeout = time.Duration(timeout) * time.Second } - - // default keep alive should be 30 seconds according to spec: - // https://msdn.microsoft.com/en-us/library/dd341108.aspx - p.keepAlive = 30 * time.Second - if keepAlive, ok := params["keepalive"]; ok { - timeout, err := strconv.ParseUint(keepAlive, 10, 64) + keepAlive, ok := params["keepalive"] + if ok { + timeout, err := strconv.ParseUint(keepAlive, 0, 16) if err != nil { f := "Invalid keepAlive value '%s': %s" return p, fmt.Errorf(f, keepAlive, err.Error()) @@ -1026,7 +743,7 @@ func parseConnectParams(dsn string) (connectParams, error) { } encrypt, ok := params["encrypt"] if ok { - if strings.EqualFold(encrypt, "DISABLE") { + if strings.ToUpper(encrypt) == "DISABLE" { p.disableEncryption = true } else { var err error @@ -1102,7 +819,7 @@ func parseConnectParams(dsn string) (connectParams, error) { return p, nil } -type auth interface { +type Auth interface { InitialBytes() ([]byte, error) NextBytes([]byte) ([]byte, error) Free() @@ -1111,7 +828,7 @@ type auth interface { // SQL Server AlwaysOn Availability Group Listeners are bound by DNS to a // list of IP addresses. So if there is more than one, try them all and // use the first one that allows a connection. -func dialConnection(ctx context.Context, c *Connector, p connectParams) (conn net.Conn, err error) { +func dialConnection(p connectParams) (conn net.Conn, err error) { var ips []net.IP ips, err = net.LookupIP(p.host) if err != nil { @@ -1122,9 +839,9 @@ func dialConnection(ctx context.Context, c *Connector, p connectParams) (conn ne ips = []net.IP{ip} } if len(ips) == 1 { - d := c.getDialer(&p) + d := createDialer(p) addr := net.JoinHostPort(ips[0].String(), strconv.Itoa(int(p.port))) - conn, err = d.DialContext(ctx, "tcp", addr) + conn, err = d.Dial("tcp", addr) } else { //Try Dials in parallel to avoid waiting for timeouts. @@ -1133,9 +850,9 @@ func dialConnection(ctx context.Context, c *Connector, p connectParams) (conn ne portStr := strconv.Itoa(int(p.port)) for _, ip := range ips { go func(ip net.IP) { - d := c.getDialer(&p) + d := createDialer(p) addr := net.JoinHostPort(ip.String(), portStr) - conn, err := d.DialContext(ctx, "tcp", addr) + conn, err := d.Dial("tcp", addr) if err == nil { connChan <- conn } else { @@ -1170,21 +887,16 @@ func dialConnection(ctx context.Context, c *Connector, p connectParams) (conn ne f := "Unable to open tcp connection with host '%v:%v': %v" return nil, fmt.Errorf(f, p.host, p.port, err.Error()) } + return conn, err } -func connect(ctx context.Context, c *Connector, log optionalLogger, p connectParams) (res *tdsSession, err error) { - dialCtx := ctx - if p.dial_timeout > 0 { - var cancel func() - dialCtx, cancel = context.WithTimeout(ctx, p.dial_timeout) - defer cancel() - } +func connect(p connectParams) (res *tdsSession, err error) { + res = nil // if instance is specified use instance resolution service if p.instance != "" { p.instance = strings.ToUpper(p.instance) - d := c.getDialer(&p) - instances, err := getInstances(dialCtx, d, p.host) + instances, err := getInstances(p.host) if err != nil { f := "Unable to get instances from Sql Server Browser on host %v: %v" return nil, fmt.Errorf(f, p.host, err.Error()) @@ -1202,17 +914,16 @@ func connect(ctx context.Context, c *Connector, log optionalLogger, p connectPar } initiate_connection: - conn, err := dialConnection(dialCtx, c, p) + conn, err := dialConnection(p) if err != nil { return nil, err } - toconn := newTimeoutConn(conn, p.conn_timeout) + toconn := NewTimeoutConn(conn, p.conn_timeout) - outbuf := newTdsBuffer(p.packetSize, toconn) + outbuf := newTdsBuffer(4096, toconn) sess := tdsSession{ buf: outbuf, - log: log, logFlags: p.logFlags, } @@ -1258,7 +969,8 @@ initiate_connection: if p.certificate != "" { pem, err := ioutil.ReadFile(p.certificate) if err != nil { - return nil, fmt.Errorf("Cannot read certificate %q: %v", p.certificate, err) + f := "Cannot read certificate '%s': %s" + return nil, fmt.Errorf(f, p.certificate, err.Error()) } certs := x509.NewCertPool() certs.AppendCertsFromPEM(pem) @@ -1268,20 +980,15 @@ initiate_connection: config.InsecureSkipVerify = true } config.ServerName = p.hostInCertificate - // fix for https://github.com/denisenkom/go-mssqldb/issues/166 - // Go implementation of TLS payload size heuristic algorithm splits single TDS package to multiple TCP segments, - // while SQL Server seems to expect one TCP segment per encrypted TDS package. - // Setting DynamicRecordSizingDisabled to true disables that algorithm and uses 16384 bytes per TLS package - config.DynamicRecordSizingDisabled = true outbuf.transport = conn toconn.buf = outbuf tlsConn := tls.Client(toconn, &config) err = tlsConn.Handshake() - toconn.buf = nil outbuf.transport = tlsConn if err != nil { - return nil, fmt.Errorf("TLS Handshake failed: %v", err) + f := "TLS Handshake failed: %s" + return nil, fmt.Errorf(f, err.Error()) } if encrypt == encryptOff { outbuf.afterFirst = func() { @@ -1292,7 +999,7 @@ initiate_connection: login := login{ TDSVersion: verTDS74, - PacketSize: uint32(outbuf.PackageSize()), + PacketSize: uint32(len(outbuf.buf)), Database: p.database, OptionFlags2: fODBC, // to get unlimited TEXTSIZE HostName: p.workstation, @@ -1321,7 +1028,7 @@ initiate_connection: var sspi_msg []byte continue_login: tokchan := make(chan tokenStruct, 5) - go processResponse(context.Background(), &sess, tokchan, nil) + go processResponse(&sess, tokchan) success := false for tok := range tokchan { switch token := tok.(type) { @@ -1335,14 +1042,10 @@ continue_login: sess.loginAck = token case error: return nil, fmt.Errorf("Login error: %s", token.Error()) - case doneStruct: - if token.isError() { - return nil, fmt.Errorf("Login error: %s", token.getError()) - } } } if sspi_msg != nil { - outbuf.BeginPacket(packSSPIMessage, false) + outbuf.BeginPacket(packSSPIMessage) _, err = outbuf.Write(sspi_msg) if err != nil { return nil, err diff --git a/vendor/github.com/denisenkom/go-mssqldb/token.go b/vendor/github.com/denisenkom/go-mssqldb/token.go index 1acac8a5d2bca..f20bd14cc9ed3 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/token.go +++ b/vendor/github.com/denisenkom/go-mssqldb/token.go @@ -1,40 +1,30 @@ package mssql import ( - "context" "encoding/binary" - "errors" - "fmt" "io" - "net" "strconv" "strings" ) -//go:generate stringer -type token - -type token byte - // token ids const ( - tokenReturnStatus token = 121 // 0x79 - tokenColMetadata token = 129 // 0x81 - tokenOrder token = 169 // 0xA9 - tokenError token = 170 // 0xAA - tokenInfo token = 171 // 0xAB - tokenReturnValue token = 0xAC - tokenLoginAck token = 173 // 0xad - tokenRow token = 209 // 0xd1 - tokenNbcRow token = 210 // 0xd2 - tokenEnvChange token = 227 // 0xE3 - tokenSSPI token = 237 // 0xED - tokenDone token = 253 // 0xFD - tokenDoneProc token = 254 - tokenDoneInProc token = 255 + tokenReturnStatus = 121 // 0x79 + tokenColMetadata = 129 // 0x81 + tokenOrder = 169 // 0xA9 + tokenError = 170 // 0xAA + tokenInfo = 171 // 0xAB + tokenLoginAck = 173 // 0xad + tokenRow = 209 // 0xd1 + tokenNbcRow = 210 // 0xd2 + tokenEnvChange = 227 // 0xE3 + tokenSSPI = 237 // 0xED + tokenDone = 253 // 0xFD + tokenDoneProc = 254 + tokenDoneInProc = 255 ) // done flags -// https://msdn.microsoft.com/en-us/library/dd340421.aspx const ( doneFinal = 0 doneMore = 1 @@ -69,13 +59,6 @@ const ( envRouting = 20 ) -// COLMETADATA flags -// https://msdn.microsoft.com/en-us/library/dd357363.aspx -const ( - colFlagNullable = 1 - // TODO implement more flags -) - // interface for all tokens type tokenStruct interface{} @@ -87,19 +70,6 @@ type doneStruct struct { Status uint16 CurCmd uint16 RowCount uint64 - errors []Error -} - -func (d doneStruct) isError() bool { - return d.Status&doneError != 0 || len(d.errors) > 0 -} - -func (d doneStruct) getError() Error { - if len(d.errors) > 0 { - return d.errors[len(d.errors)-1] - } else { - return Error{Message: "Request failed but didn't provide reason"} - } } type doneInProcStruct doneStruct @@ -150,23 +120,27 @@ func processEnvChg(sess *tdsSession) { badStreamPanic(err) } case envTypLanguage: - // currently ignored - // new value - if _, err = readBVarChar(r); err != nil { - badStreamPanic(err) - } + //currently ignored // old value - if _, err = readBVarChar(r); err != nil { + _, err = readBVarChar(r) + if err != nil { badStreamPanic(err) } - case envTypCharset: - // currently ignored // new value - if _, err = readBVarChar(r); err != nil { + _, err = readBVarChar(r) + if err != nil { badStreamPanic(err) } + case envTypCharset: + //currently ignored // old value - if _, err = readBVarChar(r); err != nil { + _, err = readBVarChar(r) + if err != nil { + badStreamPanic(err) + } + // new value + _, err = readBVarChar(r) + if err != nil { badStreamPanic(err) } case envTypPacketSize: @@ -182,55 +156,38 @@ func processEnvChg(sess *tdsSession) { if err != nil { badStreamPanicf("Invalid Packet size value returned from server (%s): %s", packetsize, err.Error()) } - sess.buf.ResizeBuffer(packetsizei) + if len(sess.buf.buf) != packetsizei { + newbuf := make([]byte, packetsizei) + copy(newbuf, sess.buf.buf) + sess.buf.buf = newbuf + } case envSortId: // currently ignored - // new value + // old value, should be 0 if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - // old value, should be 0 + // new value if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } case envSortFlags: // currently ignored - // new value + // old value, should be 0 if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - // old value, should be 0 + // new value if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } case envSqlCollation: // currently ignored - var collationSize uint8 - err = binary.Read(r, binary.LittleEndian, &collationSize) - if err != nil { - badStreamPanic(err) - } - - // SQL Collation data should contain 5 bytes in length - if collationSize != 5 { - badStreamPanicf("Invalid SQL Collation size value returned from server: %d", collationSize) - } - - // 4 bytes, contains: LCID ColFlags Version - var info uint32 - err = binary.Read(r, binary.LittleEndian, &info) - if err != nil { - badStreamPanic(err) - } - - // 1 byte, contains: sortID - var sortID uint8 - err = binary.Read(r, binary.LittleEndian, &sortID) - if err != nil { + // old value + if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - - // old value, should be 0 + // new value if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } @@ -269,21 +226,21 @@ func processEnvChg(sess *tdsSession) { sess.tranid = 0 case envEnlistDTC: // currently ignored - // new value, should be 0 + // old value if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - // old value + // new value, should be 0 if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } case envDefectTran: // currently ignored - // new value + // old value, should be 0 if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } - // old value, should be 0 + // new value if _, err = readBVarChar(r); err != nil { badStreamPanic(err) } @@ -385,9 +342,11 @@ func processEnvChg(sess *tdsSession) { } } +type returnStatus int32 + // http://msdn.microsoft.com/en-us/library/dd358180.aspx -func parseReturnStatus(r *tdsBuffer) ReturnStatus { - return ReturnStatus(r.int32()) +func parseReturnStatus(r *tdsBuffer) returnStatus { + return returnStatus(r.int32()) } func parseOrder(r *tdsBuffer) (res orderStruct) { @@ -399,7 +358,6 @@ func parseOrder(r *tdsBuffer) (res orderStruct) { return res } -// https://msdn.microsoft.com/en-us/library/dd340421.aspx func parseDone(r *tdsBuffer) (res doneStruct) { res.Status = r.uint16() res.CurCmd = r.uint16() @@ -407,7 +365,6 @@ func parseDone(r *tdsBuffer) (res doneStruct) { return res } -// https://msdn.microsoft.com/en-us/library/dd340553.aspx func parseDoneInProc(r *tdsBuffer) (res doneInProcStruct) { res.Status = r.uint16() res.CurCmd = r.uint16() @@ -516,57 +473,26 @@ func parseInfo(r *tdsBuffer) (res Error) { return } -// https://msdn.microsoft.com/en-us/library/dd303881.aspx -func parseReturnValue(r *tdsBuffer) (nv namedValue) { - /* - ParamOrdinal - ParamName - Status - UserType - Flags - TypeInfo - CryptoMetadata - Value - */ - r.uint16() - nv.Name = r.BVarChar() - r.byte() - r.uint32() // UserType (uint16 prior to 7.2) - r.uint16() - ti := readTypeInfo(r) - nv.Value = ti.Reader(&ti, r) - return -} - -func processSingleResponse(sess *tdsSession, ch chan tokenStruct, outs map[string]interface{}) { +func processResponse(sess *tdsSession, ch chan tokenStruct) { defer func() { if err := recover(); err != nil { - if sess.logFlags&logErrors != 0 { - sess.log.Printf("ERROR: Intercepted panic %v", err) - } ch <- err } close(ch) }() - packet_type, err := sess.buf.BeginRead() if err != nil { - if sess.logFlags&logErrors != 0 { - sess.log.Printf("ERROR: BeginRead failed %v", err) - } ch <- err return } if packet_type != packReply { - badStreamPanic(fmt.Errorf("unexpected packet type in reply: got %v, expected %v", packet_type, packReply)) + badStreamPanicf("invalid response packet type, expected REPLY, actual: %d", packet_type) } var columns []columnStruct - errs := make([]Error, 0, 5) + var lastError Error + var failed bool for { - token := token(sess.buf.byte()) - if sess.logFlags&logDebug != 0 { - sess.log.Printf("got token %v", token) - } + token := sess.buf.byte() switch token { case tokenSSPI: ch <- parseSSPIMsg(sess.buf) @@ -588,16 +514,17 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct, outs map[strin ch <- done case tokenDone, tokenDoneProc: done := parseDone(sess.buf) - done.errors = errs - if sess.logFlags&logDebug != 0 { - sess.log.Printf("got DONE or DONEPROC status=%d", done.Status) + if sess.logFlags&logRows != 0 && done.Status&doneCount != 0 { + sess.log.Printf("(%d row(s) affected)\n", done.RowCount) } - if done.Status&doneSrvError != 0 { - ch <- errors.New("SQL Server had internal error") + if done.Status&doneError != 0 || failed { + ch <- lastError return } - if sess.logFlags&logRows != 0 && done.Status&doneCount != 0 { - sess.log.Printf("(%d row(s) affected)\n", done.RowCount) + if done.Status&doneSrvError != 0 { + lastError.Message = "Server Error" + ch <- lastError + return } ch <- done if done.Status&doneMore == 0 { @@ -617,188 +544,18 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct, outs map[strin case tokenEnvChange: processEnvChg(sess) case tokenError: - err := parseError72(sess.buf) - if sess.logFlags&logDebug != 0 { - sess.log.Printf("got ERROR %d %s", err.Number, err.Message) - } - errs = append(errs, err) + lastError = parseError72(sess.buf) + failed = true if sess.logFlags&logErrors != 0 { - sess.log.Println(err.Message) + sess.log.Println(lastError.Message) } case tokenInfo: info := parseInfo(sess.buf) - if sess.logFlags&logDebug != 0 { - sess.log.Printf("got INFO %d %s", info.Number, info.Message) - } if sess.logFlags&logMessages != 0 { sess.log.Println(info.Message) } - case tokenReturnValue: - nv := parseReturnValue(sess.buf) - if len(nv.Name) > 0 { - name := nv.Name[1:] // Remove the leading "@". - if ov, has := outs[name]; has { - err = scanIntoOut(name, nv.Value, ov) - if err != nil { - fmt.Println("scan error", err) - ch <- err - } - } - } default: - badStreamPanic(fmt.Errorf("unknown token type returned: %v", token)) - } - } -} - -type parseRespIter byte - -const ( - parseRespIterContinue parseRespIter = iota // Continue parsing current token. - parseRespIterNext // Fetch the next token. - parseRespIterDone // Done with parsing the response. -) - -type parseRespState byte - -const ( - parseRespStateNormal parseRespState = iota // Normal response state. - parseRespStateCancel // Query is canceled, wait for server to confirm. - parseRespStateClosing // Waiting for tokens to come through. -) - -type parseResp struct { - sess *tdsSession - ctxDone <-chan struct{} - state parseRespState - cancelError error -} - -func (ts *parseResp) sendAttention(ch chan tokenStruct) parseRespIter { - if err := sendAttention(ts.sess.buf); err != nil { - ts.dlogf("failed to send attention signal %v", err) - ch <- err - return parseRespIterDone - } - ts.state = parseRespStateCancel - return parseRespIterContinue -} - -func (ts *parseResp) dlog(msg string) { - if ts.sess.logFlags&logDebug != 0 { - ts.sess.log.Println(msg) - } -} -func (ts *parseResp) dlogf(f string, v ...interface{}) { - if ts.sess.logFlags&logDebug != 0 { - ts.sess.log.Printf(f, v...) - } -} - -func (ts *parseResp) iter(ctx context.Context, ch chan tokenStruct, tokChan chan tokenStruct) parseRespIter { - switch ts.state { - default: - panic("unknown state") - case parseRespStateNormal: - select { - case tok, ok := <-tokChan: - if !ok { - ts.dlog("response finished") - return parseRespIterDone - } - if err, ok := tok.(net.Error); ok && err.Timeout() { - ts.cancelError = err - ts.dlog("got timeout error, sending attention signal to server") - return ts.sendAttention(ch) - } - // Pass the token along. - ch <- tok - return parseRespIterContinue - - case <-ts.ctxDone: - ts.ctxDone = nil - ts.dlog("got cancel message, sending attention signal to server") - return ts.sendAttention(ch) - } - case parseRespStateCancel: // Read all responses until a DONE or error is received.Auth - select { - case tok, ok := <-tokChan: - if !ok { - ts.dlog("response finished but waiting for attention ack") - return parseRespIterNext - } - switch tok := tok.(type) { - default: - // Ignore all other tokens while waiting. - // The TDS spec says other tokens may arrive after an attention - // signal is sent. Ignore these tokens and continue looking for - // a DONE with attention confirm mark. - case doneStruct: - if tok.Status&doneAttn != 0 { - ts.dlog("got cancellation confirmation from server") - if ts.cancelError != nil { - ch <- ts.cancelError - ts.cancelError = nil - } else { - ch <- ctx.Err() - } - return parseRespIterDone - } - - // If an error happens during cancel, pass it along and just stop. - // We are uncertain to receive more tokens. - case error: - ch <- tok - ts.state = parseRespStateClosing - } - return parseRespIterContinue - case <-ts.ctxDone: - ts.ctxDone = nil - ts.state = parseRespStateClosing - return parseRespIterContinue - } - case parseRespStateClosing: // Wait for current token chan to close. - if _, ok := <-tokChan; !ok { - ts.dlog("response finished") - return parseRespIterDone - } - return parseRespIterContinue - } -} - -func processResponse(ctx context.Context, sess *tdsSession, ch chan tokenStruct, outs map[string]interface{}) { - ts := &parseResp{ - sess: sess, - ctxDone: ctx.Done(), - } - defer func() { - // Ensure any remaining error is piped through - // or the query may look like it executed when it actually failed. - if ts.cancelError != nil { - ch <- ts.cancelError - ts.cancelError = nil - } - close(ch) - }() - - // Loop over multiple responses. - for { - ts.dlog("initiating response reading") - - tokChan := make(chan tokenStruct) - go processSingleResponse(sess, tokChan, outs) - - // Loop over multiple tokens in response. - tokensLoop: - for { - switch ts.iter(ctx, ch, tokChan) { - case parseRespIterContinue: - // Nothing, continue to next token. - case parseRespIterNext: - break tokensLoop - case parseRespIterDone: - return - } + badStreamPanicf("Unknown token type: %d", token) } } } diff --git a/vendor/github.com/denisenkom/go-mssqldb/token_string.go b/vendor/github.com/denisenkom/go-mssqldb/token_string.go deleted file mode 100644 index c075b23be0138..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/token_string.go +++ /dev/null @@ -1,53 +0,0 @@ -// Code generated by "stringer -type token"; DO NOT EDIT - -package mssql - -import "fmt" - -const ( - _token_name_0 = "tokenReturnStatus" - _token_name_1 = "tokenColMetadata" - _token_name_2 = "tokenOrdertokenErrortokenInfo" - _token_name_3 = "tokenLoginAck" - _token_name_4 = "tokenRowtokenNbcRow" - _token_name_5 = "tokenEnvChange" - _token_name_6 = "tokenSSPI" - _token_name_7 = "tokenDonetokenDoneProctokenDoneInProc" -) - -var ( - _token_index_0 = [...]uint8{0, 17} - _token_index_1 = [...]uint8{0, 16} - _token_index_2 = [...]uint8{0, 10, 20, 29} - _token_index_3 = [...]uint8{0, 13} - _token_index_4 = [...]uint8{0, 8, 19} - _token_index_5 = [...]uint8{0, 14} - _token_index_6 = [...]uint8{0, 9} - _token_index_7 = [...]uint8{0, 9, 22, 37} -) - -func (i token) String() string { - switch { - case i == 121: - return _token_name_0 - case i == 129: - return _token_name_1 - case 169 <= i && i <= 171: - i -= 169 - return _token_name_2[_token_index_2[i]:_token_index_2[i+1]] - case i == 173: - return _token_name_3 - case 209 <= i && i <= 210: - i -= 209 - return _token_name_4[_token_index_4[i]:_token_index_4[i+1]] - case i == 227: - return _token_name_5 - case i == 237: - return _token_name_6 - case 253 <= i && i <= 255: - i -= 253 - return _token_name_7[_token_index_7[i]:_token_index_7[i+1]] - default: - return fmt.Sprintf("token(%d)", i) - } -} diff --git a/vendor/github.com/denisenkom/go-mssqldb/tran.go b/vendor/github.com/denisenkom/go-mssqldb/tran.go index cb6436816f97e..ae38107661139 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/tran.go +++ b/vendor/github.com/denisenkom/go-mssqldb/tran.go @@ -1,7 +1,6 @@ -package mssql - // Transaction Manager requests // http://msdn.microsoft.com/en-us/library/dd339887.aspx +package mssql import ( "encoding/binary" @@ -17,19 +16,9 @@ const ( tmSaveXact = 9 ) -type isoLevel uint8 - -const ( - isolationUseCurrent isoLevel = 0 - isolationReadUncommited = 1 - isolationReadCommited = 2 - isolationRepeatableRead = 3 - isolationSerializable = 4 - isolationSnapshot = 5 -) - -func sendBeginXact(buf *tdsBuffer, headers []headerStruct, isolation isoLevel, name string, resetSession bool) (err error) { - buf.BeginPacket(packTransMgrReq, resetSession) +func sendBeginXact(buf *tdsBuffer, headers []headerStruct, isolation uint8, + name string) (err error) { + buf.BeginPacket(packTransMgrReq) writeAllHeaders(buf, headers) var rqtype uint16 = tmBeginXact err = binary.Write(buf, binary.LittleEndian, &rqtype) @@ -51,8 +40,8 @@ const ( fBeginXact = 1 ) -func sendCommitXact(buf *tdsBuffer, headers []headerStruct, name string, flags uint8, isolation uint8, newname string, resetSession bool) error { - buf.BeginPacket(packTransMgrReq, resetSession) +func sendCommitXact(buf *tdsBuffer, headers []headerStruct, name string, flags uint8, isolation uint8, newname string) error { + buf.BeginPacket(packTransMgrReq) writeAllHeaders(buf, headers) var rqtype uint16 = tmCommitXact err := binary.Write(buf, binary.LittleEndian, &rqtype) @@ -80,8 +69,8 @@ func sendCommitXact(buf *tdsBuffer, headers []headerStruct, name string, flags u return buf.FinishPacket() } -func sendRollbackXact(buf *tdsBuffer, headers []headerStruct, name string, flags uint8, isolation uint8, newname string, resetSession bool) error { - buf.BeginPacket(packTransMgrReq, resetSession) +func sendRollbackXact(buf *tdsBuffer, headers []headerStruct, name string, flags uint8, isolation uint8, newname string) error { + buf.BeginPacket(packTransMgrReq) writeAllHeaders(buf, headers) var rqtype uint16 = tmRollbackXact err := binary.Write(buf, binary.LittleEndian, &rqtype) diff --git a/vendor/github.com/denisenkom/go-mssqldb/types.go b/vendor/github.com/denisenkom/go-mssqldb/types.go index 3bad788b92392..c38862e9eb767 100644 --- a/vendor/github.com/denisenkom/go-mssqldb/types.go +++ b/vendor/github.com/denisenkom/go-mssqldb/types.go @@ -6,11 +6,8 @@ import ( "fmt" "io" "math" - "reflect" "strconv" "time" - - "github.com/denisenkom/go-mssqldb/internal/cp" ) // fixed-length data types @@ -69,9 +66,6 @@ const ( typeNText = 0x63 typeVariant = 0x62 ) -const _PLP_NULL = 0xFFFFFFFFFFFFFFFF -const _UNKNOWN_PLP_LEN = 0xFFFFFFFFFFFFFFFE -const _PLP_TERMINATOR = 0x00000000 // TYPE_INFO rule // http://msdn.microsoft.com/en-us/library/dd358284.aspx @@ -81,32 +75,11 @@ type typeInfo struct { Scale uint8 Prec uint8 Buffer []byte - Collation cp.Collation - UdtInfo udtInfo - XmlInfo xmlInfo + Collation collation Reader func(ti *typeInfo, r *tdsBuffer) (res interface{}) Writer func(w io.Writer, ti typeInfo, buf []byte) (err error) } -// Common Language Runtime (CLR) Instances -// http://msdn.microsoft.com/en-us/library/dd357962.aspx -type udtInfo struct { - //MaxByteSize uint32 - DBName string - SchemaName string - TypeName string - AssemblyQualifiedName string -} - -// XML Values -// http://msdn.microsoft.com/en-us/library/dd304764.aspx -type xmlInfo struct { - SchemaPresent uint8 - DBName string - OwningSchema string - XmlSchemaCollection string -} - func readTypeInfo(r *tdsBuffer) (res typeInfo) { res.TypeId = r.byte() switch res.TypeId { @@ -133,7 +106,6 @@ func readTypeInfo(r *tdsBuffer) (res typeInfo) { return } -// https://msdn.microsoft.com/en-us/library/dd358284.aspx func writeTypeInfo(w io.Writer, ti *typeInfo) (err error) { err = binary.Write(w, binary.LittleEndian, ti.TypeId) if err != nil { @@ -142,9 +114,7 @@ func writeTypeInfo(w io.Writer, ti *typeInfo) (err error) { switch ti.TypeId { case typeNull, typeInt1, typeBit, typeInt2, typeInt4, typeDateTim4, typeFlt4, typeMoney, typeDateTime, typeFlt8, typeMoney4, typeInt8: - // those are fixed length - // https://msdn.microsoft.com/en-us/library/dd341171.aspx - ti.Writer = writeFixedType + // those are fixed length types default: // all others are VARLENTYPE err = writeVarLen(w, ti) if err != nil { @@ -154,26 +124,19 @@ func writeTypeInfo(w io.Writer, ti *typeInfo) (err error) { return } -func writeFixedType(w io.Writer, ti typeInfo, buf []byte) (err error) { - _, err = w.Write(buf) - return -} - -// https://msdn.microsoft.com/en-us/library/dd358341.aspx func writeVarLen(w io.Writer, ti *typeInfo) (err error) { switch ti.TypeId { case typeDateN: - ti.Writer = writeByteLenType + case typeTimeN, typeDateTime2N, typeDateTimeOffsetN: if err = binary.Write(w, binary.LittleEndian, ti.Scale); err != nil { return } ti.Writer = writeByteLenType - case typeIntN, typeDecimal, typeNumeric, + case typeGuid, typeIntN, typeDecimal, typeNumeric, typeBitN, typeDecimalN, typeNumericN, typeFltN, typeMoneyN, typeDateTimeN, typeChar, typeVarChar, typeBinary, typeVarBinary: - // byle len types if ti.Size > 0xff { panic("Invalid size for BYLELEN_TYPE") @@ -193,14 +156,6 @@ func writeVarLen(w io.Writer, ti *typeInfo) (err error) { } } ti.Writer = writeByteLenType - case typeGuid: - if !(ti.Size == 0x10 || ti.Size == 0x00) { - panic("Invalid size for BYLELEN_TYPE") - } - if err = binary.Write(w, binary.LittleEndian, uint8(ti.Size)); err != nil { - return - } - ti.Writer = writeByteLenType case typeBigVarBin, typeBigVarChar, typeBigBinary, typeBigChar, typeNVarChar, typeNChar, typeXml, typeUdt: // short len types @@ -221,19 +176,14 @@ func writeVarLen(w io.Writer, ti *typeInfo) (err error) { return } case typeXml: - if err = binary.Write(w, binary.LittleEndian, ti.XmlInfo.SchemaPresent); err != nil { + var schemapresent uint8 = 0 + if err = binary.Write(w, binary.LittleEndian, schemapresent); err != nil { return } } case typeText, typeImage, typeNText, typeVariant: // LONGLEN_TYPE - if err = binary.Write(w, binary.LittleEndian, uint32(ti.Size)); err != nil { - return - } - if err = writeCollation(w, ti.Collation); err != nil { - return - } - ti.Writer = writeLongLenType + panic("LONGLEN_TYPE not implemented") default: panic("Invalid type") } @@ -248,48 +198,6 @@ func decodeDateTim4(buf []byte) time.Time { 0, int(mins), 0, 0, time.UTC) } -func encodeDateTim4(val time.Time) (buf []byte) { - buf = make([]byte, 4) - - ref := time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC) - dur := val.Sub(ref) - days := dur / (24 * time.Hour) - mins := val.Hour()*60 + val.Minute() - if days < 0 { - days = 0 - mins = 0 - } - - binary.LittleEndian.PutUint16(buf[:2], uint16(days)) - binary.LittleEndian.PutUint16(buf[2:], uint16(mins)) - return -} - -// encodes datetime value -// type identifier is typeDateTimeN -func encodeDateTime(t time.Time) (res []byte) { - // base date in days since Jan 1st 1900 - basedays := gregorianDays(1900, 1) - // days since Jan 1st 1900 (same TZ as t) - days := gregorianDays(t.Year(), t.YearDay()) - basedays - tm := 300*(t.Second()+t.Minute()*60+t.Hour()*60*60) + t.Nanosecond()*300/1e9 - // minimum and maximum possible - mindays := gregorianDays(1753, 1) - basedays - maxdays := gregorianDays(9999, 365) - basedays - if days < mindays { - days = mindays - tm = 0 - } - if days > maxdays { - days = maxdays - tm = (23*60*60+59*60+59)*300 + 299 - } - res = make([]byte, 8) - binary.LittleEndian.PutUint32(res[0:4], uint32(days)) - binary.LittleEndian.PutUint32(res[4:8], uint32(tm)) - return -} - func decodeDateTime(buf []byte) time.Time { days := int32(binary.LittleEndian.Uint32(buf)) tm := binary.LittleEndian.Uint32(buf[4:]) @@ -299,7 +207,7 @@ func decodeDateTime(buf []byte) time.Time { 0, 0, secs, ns, time.UTC) } -func readFixedType(ti *typeInfo, r *tdsBuffer) interface{} { +func readFixedType(ti *typeInfo, r *tdsBuffer) (res interface{}) { r.ReadFull(ti.Buffer) buf := ti.Buffer switch ti.TypeId { @@ -333,7 +241,12 @@ func readFixedType(ti *typeInfo, r *tdsBuffer) interface{} { panic("shoulnd't get here") } -func readByteLenType(ti *typeInfo, r *tdsBuffer) interface{} { +func writeFixedType(w io.Writer, ti typeInfo, buf []byte) (err error) { + _, err = w.Write(buf) + return +} + +func readByteLenType(ti *typeInfo, r *tdsBuffer) (res interface{}) { size := r.byte() if size == 0 { return nil @@ -365,7 +278,7 @@ func readByteLenType(ti *typeInfo, r *tdsBuffer) interface{} { case 8: return int64(binary.LittleEndian.Uint64(buf)) default: - badStreamPanicf("Invalid size for INTNTYPE: %d", len(buf)) + badStreamPanicf("Invalid size for INTNTYPE") } case typeDecimal, typeNumeric, typeDecimalN, typeNumericN: return decodeDecimal(ti.Prec, ti.Scale, buf) @@ -392,10 +305,6 @@ func readByteLenType(ti *typeInfo, r *tdsBuffer) interface{} { default: badStreamPanicf("Invalid size for MONEYNTYPE") } - case typeDateTim4: - return decodeDateTim4(buf) - case typeDateTime: - return decodeDateTime(buf) case typeDateTimeN: switch len(buf) { case 4: @@ -424,7 +333,7 @@ func writeByteLenType(w io.Writer, ti typeInfo, buf []byte) (err error) { if ti.Size > 0xff { panic("Invalid size for BYTELEN_TYPE") } - err = binary.Write(w, binary.LittleEndian, uint8(len(buf))) + err = binary.Write(w, binary.LittleEndian, uint8(ti.Size)) if err != nil { return } @@ -432,7 +341,7 @@ func writeByteLenType(w io.Writer, ti typeInfo, buf []byte) (err error) { return } -func readShortLenType(ti *typeInfo, r *tdsBuffer) interface{} { +func readShortLenType(ti *typeInfo, r *tdsBuffer) (res interface{}) { size := r.uint16() if size == 0xffff { return nil @@ -475,7 +384,7 @@ func writeShortLenType(w io.Writer, ti typeInfo, buf []byte) (err error) { return } -func readLongLenType(ti *typeInfo, r *tdsBuffer) interface{} { +func readLongLenType(ti *typeInfo, r *tdsBuffer) (res interface{}) { // information about this format can be found here: // http://msdn.microsoft.com/en-us/library/dd304783.aspx // and here: @@ -506,51 +415,10 @@ func readLongLenType(ti *typeInfo, r *tdsBuffer) interface{} { } panic("shoulnd't get here") } -func writeLongLenType(w io.Writer, ti typeInfo, buf []byte) (err error) { - //textptr - err = binary.Write(w, binary.LittleEndian, byte(0x10)) - if err != nil { - return - } - err = binary.Write(w, binary.LittleEndian, uint64(0xFFFFFFFFFFFFFFFF)) - if err != nil { - return - } - err = binary.Write(w, binary.LittleEndian, uint64(0xFFFFFFFFFFFFFFFF)) - if err != nil { - return - } - //timestamp? - err = binary.Write(w, binary.LittleEndian, uint64(0xFFFFFFFFFFFFFFFF)) - if err != nil { - return - } - - err = binary.Write(w, binary.LittleEndian, uint32(ti.Size)) - if err != nil { - return - } - _, err = w.Write(buf) - return -} - -func readCollation(r *tdsBuffer) (res cp.Collation) { - res.LcidAndFlags = r.uint32() - res.SortId = r.byte() - return -} - -func writeCollation(w io.Writer, col cp.Collation) (err error) { - if err = binary.Write(w, binary.LittleEndian, col.LcidAndFlags); err != nil { - return - } - err = binary.Write(w, binary.LittleEndian, col.SortId) - return -} // reads variant value // http://msdn.microsoft.com/en-us/library/dd303302.aspx -func readVariantType(ti *typeInfo, r *tdsBuffer) interface{} { +func readVariantType(ti *typeInfo, r *tdsBuffer) (res interface{}) { size := r.int32() if size == 0 { return nil @@ -642,14 +510,14 @@ func readVariantType(ti *typeInfo, r *tdsBuffer) interface{} { // partially length prefixed stream // http://msdn.microsoft.com/en-us/library/dd340469.aspx -func readPLPType(ti *typeInfo, r *tdsBuffer) interface{} { +func readPLPType(ti *typeInfo, r *tdsBuffer) (res interface{}) { size := r.uint64() var buf *bytes.Buffer switch size { - case _PLP_NULL: + case 0xffffffffffffffff: // null return nil - case _UNKNOWN_PLP_LEN: + case 0xfffffffffffffffe: // size unknown buf = bytes.NewBuffer(make([]byte, 0, 1000)) default: @@ -680,16 +548,15 @@ func readPLPType(ti *typeInfo, r *tdsBuffer) interface{} { } func writePLPType(w io.Writer, ti typeInfo, buf []byte) (err error) { - if err = binary.Write(w, binary.LittleEndian, uint64(_UNKNOWN_PLP_LEN)); err != nil { + if err = binary.Write(w, binary.LittleEndian, uint64(len(buf))); err != nil { return } for { chunksize := uint32(len(buf)) - if chunksize == 0 { - err = binary.Write(w, binary.LittleEndian, uint32(_PLP_TERMINATOR)) + if err = binary.Write(w, binary.LittleEndian, chunksize); err != nil { return } - if err = binary.Write(w, binary.LittleEndian, chunksize); err != nil { + if chunksize == 0 { return } if _, err = w.Write(buf[:chunksize]); err != nil { @@ -739,27 +606,19 @@ func readVarLen(ti *typeInfo, r *tdsBuffer) { } ti.Reader = readByteLenType case typeXml: - ti.XmlInfo.SchemaPresent = r.byte() - if ti.XmlInfo.SchemaPresent != 0 { + schemapresent := r.byte() + if schemapresent != 0 { + // just ignore this for now // dbname - ti.XmlInfo.DBName = r.BVarChar() + r.BVarChar() // owning schema - ti.XmlInfo.OwningSchema = r.BVarChar() + r.BVarChar() // xml schema collection - ti.XmlInfo.XmlSchemaCollection = r.UsVarChar() + r.UsVarChar() } ti.Reader = readPLPType - case typeUdt: - ti.Size = int(r.uint16()) - ti.UdtInfo.DBName = r.BVarChar() - ti.UdtInfo.SchemaName = r.BVarChar() - ti.UdtInfo.TypeName = r.BVarChar() - ti.UdtInfo.AssemblyQualifiedName = r.UsVarChar() - - ti.Buffer = make([]byte, ti.Size) - ti.Reader = readPLPType case typeBigVarBin, typeBigVarChar, typeBigBinary, typeBigChar, - typeNVarChar, typeNChar: + typeNVarChar, typeNChar, typeUdt: // short len types ti.Size = int(r.uint16()) switch ti.TypeId { @@ -842,23 +701,13 @@ func decodeDecimal(prec uint8, scale uint8, buf []byte) []byte { // http://msdn.microsoft.com/en-us/library/ee780895.aspx func decodeDateInt(buf []byte) (days int) { - days = int(buf[0]) + int(buf[1])*256 + int(buf[2])*256*256 - return + return int(buf[0]) + int(buf[1])*256 + int(buf[2])*256*256 } func decodeDate(buf []byte) time.Time { return time.Date(1, 1, 1+decodeDateInt(buf), 0, 0, 0, 0, time.UTC) } -func encodeDate(val time.Time) (buf []byte) { - days, _, _ := dateTime2(val) - buf = make([]byte, 3) - buf[0] = byte(days) - buf[1] = byte(days >> 8) - buf[2] = byte(days >> 16) - return -} - func decodeTimeInt(scale uint8, buf []byte) (sec int, ns int) { var acc uint64 = 0 for i := len(buf) - 1; i >= 0; i-- { @@ -874,41 +723,11 @@ func decodeTimeInt(scale uint8, buf []byte) (sec int, ns int) { return } -// calculate size of time field in bytes -func calcTimeSize(scale int) int { - if scale <= 2 { - return 3 - } else if scale <= 4 { - return 4 - } else { - return 5 - } -} - -// writes time value into a field buffer -// buffer should be at least calcTimeSize long -func encodeTimeInt(seconds, ns, scale int, buf []byte) { - ns_total := int64(seconds)*1000*1000*1000 + int64(ns) - t := ns_total / int64(math.Pow10(int(scale)*-1)*1e9) - buf[0] = byte(t) - buf[1] = byte(t >> 8) - buf[2] = byte(t >> 16) - buf[3] = byte(t >> 24) - buf[4] = byte(t >> 32) -} - func decodeTime(scale uint8, buf []byte) time.Time { sec, ns := decodeTimeInt(scale, buf) return time.Date(1, 1, 1, 0, 0, sec, ns, time.UTC) } -func encodeTime(hour, minute, second, ns, scale int) (buf []byte) { - seconds := hour*3600 + minute*60 + second - buf = make([]byte, calcTimeSize(scale)) - encodeTimeInt(seconds, ns, scale, buf) - return -} - func decodeDateTime2(scale uint8, buf []byte) time.Time { timesize := len(buf) - 3 sec, ns := decodeTimeInt(scale, buf[:timesize]) @@ -916,17 +735,6 @@ func decodeDateTime2(scale uint8, buf []byte) time.Time { return time.Date(1, 1, 1+days, 0, 0, sec, ns, time.UTC) } -func encodeDateTime2(val time.Time, scale int) (buf []byte) { - days, seconds, ns := dateTime2(val) - timesize := calcTimeSize(scale) - buf = make([]byte, 3+timesize) - encodeTimeInt(seconds, ns, scale, buf) - buf[timesize] = byte(days) - buf[timesize+1] = byte(days >> 8) - buf[timesize+2] = byte(days >> 16) - return -} - func decodeDateTimeOffset(scale uint8, buf []byte) time.Time { timesize := len(buf) - 3 - 2 sec, ns := decodeTimeInt(scale, buf[:timesize]) @@ -938,48 +746,29 @@ func decodeDateTimeOffset(scale uint8, buf []byte) time.Time { time.FixedZone("", offset*60)) } -func encodeDateTimeOffset(val time.Time, scale int) (buf []byte) { - timesize := calcTimeSize(scale) - buf = make([]byte, timesize+2+3) - days, seconds, ns := dateTime2(val.In(time.UTC)) - encodeTimeInt(seconds, ns, scale, buf) - buf[timesize] = byte(days) - buf[timesize+1] = byte(days >> 8) - buf[timesize+2] = byte(days >> 16) - _, offset := val.Zone() - offset /= 60 - buf[timesize+3] = byte(offset) - buf[timesize+4] = byte(offset >> 8) - return -} - -// returns days since Jan 1st 0001 in Gregorian calendar -func gregorianDays(year, yearday int) int { - year0 := year - 1 - return year0*365 + year0/4 - year0/100 + year0/400 + yearday - 1 +func divFloor(x int64, y int64) int64 { + q := x / y + r := x % y + if r != 0 && ((r < 0) != (y < 0)) { + q-- + } + return q } -func dateTime2(t time.Time) (days int, seconds int, ns int) { - // days since Jan 1 1 (in same TZ as t) - days = gregorianDays(t.Year(), t.YearDay()) - seconds = t.Second() + t.Minute()*60 + t.Hour()*60*60 - ns = t.Nanosecond() - if days < 0 { - days = 0 - seconds = 0 - ns = 0 - } - max := gregorianDays(9999, 365) - if days > max { - days = max - seconds = 59 + 59*60 + 23*60*60 - ns = 999999900 - } +func dateTime2(t time.Time) (days int32, ns int64) { + // number of days since Jan 1 1970 UTC + days64 := divFloor(t.Unix(), 24*60*60) + // number of days since Jan 1 1 UTC + days = int32(days64) + 1969*365 + 1969/4 - 1969/100 + 1969/400 + // number of seconds within day + secs := t.Unix() - days64*24*60*60 + // number of nanoseconds within day + ns = secs*1e9 + int64(t.Nanosecond()) return } -func decodeChar(col cp.Collation, buf []byte) string { - return cp.CharsetToUTF8(col, buf) +func decodeChar(col collation, buf []byte) string { + return charset2utf8(col, buf) } func decodeUcs2(buf []byte) string { @@ -998,129 +787,12 @@ func decodeXml(ti typeInfo, buf []byte) string { return decodeUcs2(buf) } -func decodeUdt(ti typeInfo, buf []byte) []byte { - return buf -} - -// makes go/sql type instance as described below -// It should return -// the value type that can be used to scan types into. For example, the database -// column type "bigint" this should return "reflect.TypeOf(int64(0))". -func makeGoLangScanType(ti typeInfo) reflect.Type { - switch ti.TypeId { - case typeInt1: - return reflect.TypeOf(int64(0)) - case typeInt2: - return reflect.TypeOf(int64(0)) - case typeInt4: - return reflect.TypeOf(int64(0)) - case typeInt8: - return reflect.TypeOf(int64(0)) - case typeFlt4: - return reflect.TypeOf(float64(0)) - case typeIntN: - switch ti.Size { - case 1: - return reflect.TypeOf(int64(0)) - case 2: - return reflect.TypeOf(int64(0)) - case 4: - return reflect.TypeOf(int64(0)) - case 8: - return reflect.TypeOf(int64(0)) - default: - panic("invalid size of INTNTYPE") - } - case typeFlt8: - return reflect.TypeOf(float64(0)) - case typeFltN: - switch ti.Size { - case 4: - return reflect.TypeOf(float64(0)) - case 8: - return reflect.TypeOf(float64(0)) - default: - panic("invalid size of FLNNTYPE") - } - case typeBigVarBin: - return reflect.TypeOf([]byte{}) - case typeVarChar: - return reflect.TypeOf("") - case typeNVarChar: - return reflect.TypeOf("") - case typeBit, typeBitN: - return reflect.TypeOf(true) - case typeDecimalN, typeNumericN: - return reflect.TypeOf([]byte{}) - case typeMoney, typeMoney4, typeMoneyN: - switch ti.Size { - case 4: - return reflect.TypeOf([]byte{}) - case 8: - return reflect.TypeOf([]byte{}) - default: - panic("invalid size of MONEYN") - } - case typeDateTim4: - return reflect.TypeOf(time.Time{}) - case typeDateTime: - return reflect.TypeOf(time.Time{}) - case typeDateTimeN: - switch ti.Size { - case 4: - return reflect.TypeOf(time.Time{}) - case 8: - return reflect.TypeOf(time.Time{}) - default: - panic("invalid size of DATETIMEN") - } - case typeDateTime2N: - return reflect.TypeOf(time.Time{}) - case typeDateN: - return reflect.TypeOf(time.Time{}) - case typeTimeN: - return reflect.TypeOf(time.Time{}) - case typeDateTimeOffsetN: - return reflect.TypeOf(time.Time{}) - case typeBigVarChar: - return reflect.TypeOf("") - case typeBigChar: - return reflect.TypeOf("") - case typeNChar: - return reflect.TypeOf("") - case typeGuid: - return reflect.TypeOf([]byte{}) - case typeXml: - return reflect.TypeOf("") - case typeText: - return reflect.TypeOf("") - case typeNText: - return reflect.TypeOf("") - case typeImage: - return reflect.TypeOf([]byte{}) - case typeBigBinary: - return reflect.TypeOf([]byte{}) - case typeVariant: - return reflect.TypeOf(nil) - default: - panic(fmt.Sprintf("not implemented makeGoLangScanType for type %d", ti.TypeId)) - } +func decodeUdt(ti typeInfo, buf []byte) int { + panic("Not implemented") } func makeDecl(ti typeInfo) string { switch ti.TypeId { - case typeNull: - // maybe we should use something else here - // this is tested in TestNull - return "nvarchar(1)" - case typeInt1: - return "tinyint" - case typeBigBinary: - return fmt.Sprintf("binary(%d)", ti.Size) - case typeInt2: - return "smallint" - case typeInt4: - return "int" case typeInt8: return "bigint" case typeFlt4: @@ -1149,418 +821,25 @@ func makeDecl(ti typeInfo) string { default: panic("invalid size of FLNNTYPE") } - case typeDecimal, typeDecimalN: - return fmt.Sprintf("decimal(%d, %d)", ti.Prec, ti.Scale) - case typeNumeric, typeNumericN: - return fmt.Sprintf("numeric(%d, %d)", ti.Prec, ti.Scale) - case typeMoney4: - return "smallmoney" - case typeMoney: - return "money" - case typeMoneyN: - switch ti.Size { - case 4: - return "smallmoney" - case 8: - return "money" - default: - panic("invalid size of MONEYNTYPE") - } case typeBigVarBin: if ti.Size > 8000 || ti.Size == 0 { - return "varbinary(max)" + return fmt.Sprintf("varbinary(max)") } else { return fmt.Sprintf("varbinary(%d)", ti.Size) } - case typeNChar: - return fmt.Sprintf("nchar(%d)", ti.Size/2) - case typeBigChar, typeChar: - return fmt.Sprintf("char(%d)", ti.Size) - case typeBigVarChar, typeVarChar: - if ti.Size > 4000 || ti.Size == 0 { - return fmt.Sprintf("varchar(max)") - } else { - return fmt.Sprintf("varchar(%d)", ti.Size) - } case typeNVarChar: if ti.Size > 8000 || ti.Size == 0 { - return "nvarchar(max)" + return fmt.Sprintf("nvarchar(max)") } else { return fmt.Sprintf("nvarchar(%d)", ti.Size/2) } case typeBit, typeBitN: return "bit" - case typeDateN: - return "date" - case typeDateTim4: - return "smalldatetime" - case typeDateTime: - return "datetime" case typeDateTimeN: - switch ti.Size { - case 4: - return "smalldatetime" - case 8: - return "datetime" - default: - panic("invalid size of DATETIMNTYPE") - } - case typeTimeN: - return "time" - case typeDateTime2N: - return fmt.Sprintf("datetime2(%d)", ti.Scale) + return "datetime" case typeDateTimeOffsetN: return fmt.Sprintf("datetimeoffset(%d)", ti.Scale) - case typeText: - return "text" - case typeNText: - return "ntext" - case typeUdt: - return ti.UdtInfo.TypeName - case typeGuid: - return "uniqueidentifier" - default: - panic(fmt.Sprintf("not implemented makeDecl for type %#x", ti.TypeId)) - } -} - -// makes go/sql type name as described below -// RowsColumnTypeDatabaseTypeName may be implemented by Rows. It should return the -// database system type name without the length. Type names should be uppercase. -// Examples of returned types: "VARCHAR", "NVARCHAR", "VARCHAR2", "CHAR", "TEXT", -// "DECIMAL", "SMALLINT", "INT", "BIGINT", "BOOL", "[]BIGINT", "JSONB", "XML", -// "TIMESTAMP". -func makeGoLangTypeName(ti typeInfo) string { - switch ti.TypeId { - case typeInt1: - return "TINYINT" - case typeInt2: - return "SMALLINT" - case typeInt4: - return "INT" - case typeInt8: - return "BIGINT" - case typeFlt4: - return "REAL" - case typeIntN: - switch ti.Size { - case 1: - return "TINYINT" - case 2: - return "SMALLINT" - case 4: - return "INT" - case 8: - return "BIGINT" - default: - panic("invalid size of INTNTYPE") - } - case typeFlt8: - return "FLOAT" - case typeFltN: - switch ti.Size { - case 4: - return "REAL" - case 8: - return "FLOAT" - default: - panic("invalid size of FLNNTYPE") - } - case typeBigVarBin: - return "VARBINARY" - case typeVarChar: - return "VARCHAR" - case typeNVarChar: - return "NVARCHAR" - case typeBit, typeBitN: - return "BIT" - case typeDecimalN, typeNumericN: - return "DECIMAL" - case typeMoney, typeMoney4, typeMoneyN: - switch ti.Size { - case 4: - return "SMALLMONEY" - case 8: - return "MONEY" - default: - panic("invalid size of MONEYN") - } - case typeDateTim4: - return "SMALLDATETIME" - case typeDateTime: - return "DATETIME" - case typeDateTimeN: - switch ti.Size { - case 4: - return "SMALLDATETIME" - case 8: - return "DATETIME" - default: - panic("invalid size of DATETIMEN") - } - case typeDateTime2N: - return "DATETIME2" - case typeDateN: - return "DATE" - case typeTimeN: - return "TIME" - case typeDateTimeOffsetN: - return "DATETIMEOFFSET" - case typeBigVarChar: - return "VARCHAR" - case typeBigChar: - return "CHAR" - case typeNChar: - return "NCHAR" - case typeGuid: - return "UNIQUEIDENTIFIER" - case typeXml: - return "XML" - case typeText: - return "TEXT" - case typeNText: - return "NTEXT" - case typeImage: - return "IMAGE" - case typeVariant: - return "SQL_VARIANT" - case typeBigBinary: - return "BINARY" - default: - panic(fmt.Sprintf("not implemented makeGoLangTypeName for type %d", ti.TypeId)) - } -} - -// makes go/sql type length as described below -// It should return the length -// of the column type if the column is a variable length type. If the column is -// not a variable length type ok should return false. -// If length is not limited other than system limits, it should return math.MaxInt64. -// The following are examples of returned values for various types: -// TEXT (math.MaxInt64, true) -// varchar(10) (10, true) -// nvarchar(10) (10, true) -// decimal (0, false) -// int (0, false) -// bytea(30) (30, true) -func makeGoLangTypeLength(ti typeInfo) (int64, bool) { - switch ti.TypeId { - case typeInt1: - return 0, false - case typeInt2: - return 0, false - case typeInt4: - return 0, false - case typeInt8: - return 0, false - case typeFlt4: - return 0, false - case typeIntN: - switch ti.Size { - case 1: - return 0, false - case 2: - return 0, false - case 4: - return 0, false - case 8: - return 0, false - default: - panic("invalid size of INTNTYPE") - } - case typeFlt8: - return 0, false - case typeFltN: - switch ti.Size { - case 4: - return 0, false - case 8: - return 0, false - default: - panic("invalid size of FLNNTYPE") - } - case typeBit, typeBitN: - return 0, false - case typeDecimalN, typeNumericN: - return 0, false - case typeMoney, typeMoney4, typeMoneyN: - switch ti.Size { - case 4: - return 0, false - case 8: - return 0, false - default: - panic("invalid size of MONEYN") - } - case typeDateTim4, typeDateTime: - return 0, false - case typeDateTimeN: - switch ti.Size { - case 4: - return 0, false - case 8: - return 0, false - default: - panic("invalid size of DATETIMEN") - } - case typeDateTime2N: - return 0, false - case typeDateN: - return 0, false - case typeTimeN: - return 0, false - case typeDateTimeOffsetN: - return 0, false - case typeBigVarBin: - if ti.Size == 0xffff { - return 2147483645, true - } else { - return int64(ti.Size), true - } - case typeVarChar: - return int64(ti.Size), true - case typeBigVarChar: - if ti.Size == 0xffff { - return 2147483645, true - } else { - return int64(ti.Size), true - } - case typeBigChar: - return int64(ti.Size), true - case typeNVarChar: - if ti.Size == 0xffff { - return 2147483645 / 2, true - } else { - return int64(ti.Size) / 2, true - } - case typeNChar: - return int64(ti.Size) / 2, true - case typeGuid: - return 0, false - case typeXml: - return 1073741822, true - case typeText: - return 2147483647, true - case typeNText: - return 1073741823, true - case typeImage: - return 2147483647, true - case typeVariant: - return 0, false - case typeBigBinary: - return 0, false - default: - panic(fmt.Sprintf("not implemented makeGoLangTypeLength for type %d", ti.TypeId)) - } -} - -// makes go/sql type precision and scale as described below -// It should return the length -// of the column type if the column is a variable length type. If the column is -// not a variable length type ok should return false. -// If length is not limited other than system limits, it should return math.MaxInt64. -// The following are examples of returned values for various types: -// TEXT (math.MaxInt64, true) -// varchar(10) (10, true) -// nvarchar(10) (10, true) -// decimal (0, false) -// int (0, false) -// bytea(30) (30, true) -func makeGoLangTypePrecisionScale(ti typeInfo) (int64, int64, bool) { - switch ti.TypeId { - case typeInt1: - return 0, 0, false - case typeInt2: - return 0, 0, false - case typeInt4: - return 0, 0, false - case typeInt8: - return 0, 0, false - case typeFlt4: - return 0, 0, false - case typeIntN: - switch ti.Size { - case 1: - return 0, 0, false - case 2: - return 0, 0, false - case 4: - return 0, 0, false - case 8: - return 0, 0, false - default: - panic("invalid size of INTNTYPE") - } - case typeFlt8: - return 0, 0, false - case typeFltN: - switch ti.Size { - case 4: - return 0, 0, false - case 8: - return 0, 0, false - default: - panic("invalid size of FLNNTYPE") - } - case typeBit, typeBitN: - return 0, 0, false - case typeDecimalN, typeNumericN: - return int64(ti.Prec), int64(ti.Scale), true - case typeMoney, typeMoney4, typeMoneyN: - switch ti.Size { - case 4: - return 0, 0, false - case 8: - return 0, 0, false - default: - panic("invalid size of MONEYN") - } - case typeDateTim4, typeDateTime: - return 0, 0, false - case typeDateTimeN: - switch ti.Size { - case 4: - return 0, 0, false - case 8: - return 0, 0, false - default: - panic("invalid size of DATETIMEN") - } - case typeDateTime2N: - return 0, 0, false - case typeDateN: - return 0, 0, false - case typeTimeN: - return 0, 0, false - case typeDateTimeOffsetN: - return 0, 0, false - case typeBigVarBin: - return 0, 0, false - case typeVarChar: - return 0, 0, false - case typeBigVarChar: - return 0, 0, false - case typeBigChar: - return 0, 0, false - case typeNVarChar: - return 0, 0, false - case typeNChar: - return 0, 0, false - case typeGuid: - return 0, 0, false - case typeXml: - return 0, 0, false - case typeText: - return 0, 0, false - case typeNText: - return 0, 0, false - case typeImage: - return 0, 0, false - case typeVariant: - return 0, 0, false - case typeBigBinary: - return 0, 0, false default: - panic(fmt.Sprintf("not implemented makeGoLangTypePrecisionScale for type %d", ti.TypeId)) + panic(fmt.Sprintf("not implemented makeDecl for type %d", ti.TypeId)) } } diff --git a/vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go b/vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go deleted file mode 100644 index c8ef3149b19f0..0000000000000 --- a/vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go +++ /dev/null @@ -1,74 +0,0 @@ -package mssql - -import ( - "database/sql/driver" - "encoding/hex" - "errors" - "fmt" -) - -type UniqueIdentifier [16]byte - -func (u *UniqueIdentifier) Scan(v interface{}) error { - reverse := func(b []byte) { - for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { - b[i], b[j] = b[j], b[i] - } - } - - switch vt := v.(type) { - case []byte: - if len(vt) != 16 { - return errors.New("mssql: invalid UniqueIdentifier length") - } - - var raw UniqueIdentifier - - copy(raw[:], vt) - - reverse(raw[0:4]) - reverse(raw[4:6]) - reverse(raw[6:8]) - *u = raw - - return nil - case string: - if len(vt) != 36 { - return errors.New("mssql: invalid UniqueIdentifier string length") - } - - b := []byte(vt) - for i, c := range b { - switch c { - case '-': - b = append(b[:i], b[i+1:]...) - } - } - - _, err := hex.Decode(u[:], []byte(b)) - return err - default: - return fmt.Errorf("mssql: cannot convert %T to UniqueIdentifier", v) - } -} - -func (u UniqueIdentifier) Value() (driver.Value, error) { - reverse := func(b []byte) { - for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 { - b[i], b[j] = b[j], b[i] - } - } - - raw := make([]byte, len(u)) - copy(raw, u[:]) - - reverse(raw[0:4]) - reverse(raw[4:6]) - reverse(raw[6:8]) - - return raw, nil -} - -func (u UniqueIdentifier) String() string { - return fmt.Sprintf("%X-%X-%X-%X-%X", u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/modules.txt b/vendor/modules.txt index ee9974b3ba9a1..ef253f5194896 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,3 @@ -# cloud.google.com/go v0.34.0 -cloud.google.com/go/civil # github.com/BurntSushi/toml v0.3.1 github.com/BurntSushi/toml # github.com/PuerkitoBio/goquery v0.0.0-20170324135448-ed7d758e9a34 @@ -87,9 +85,8 @@ github.com/couchbase/vellum/utf8 github.com/couchbaselabs/go-couchbase # github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew/spew -# github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 +# github.com/denisenkom/go-mssqldb v0.0.0-20190121005146-b04fd42d9952 => github.com/denisenkom/go-mssqldb v0.0.0-20161128230840-e32ca5036449 github.com/denisenkom/go-mssqldb -github.com/denisenkom/go-mssqldb/internal/cp # github.com/dgrijalva/jwt-go v0.0.0-20161101193935-9ed569b5d1ac github.com/dgrijalva/jwt-go # github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712