@@ -25,14 +25,17 @@ const ignoreStreamId = 0
25
25
const (
26
26
connDisconnected = 0
27
27
connConnected = 1
28
- connClosed = 2
28
+ connShutdown = 2
29
+ connClosed = 3
29
30
)
30
31
31
32
const (
32
33
connTransportNone = ""
33
34
connTransportSsl = "ssl"
34
35
)
35
36
37
+ const shutdownEventKey = "box.shutdown"
38
+
36
39
type ConnEventKind int
37
40
type ConnLogKind int
38
41
@@ -45,6 +48,8 @@ const (
45
48
ReconnectFailed
46
49
// Either reconnect attempts exhausted, or explicit Close is called.
47
50
Closed
51
+ // Shutdown signals that shutdown callback is processing.
52
+ Shutdown
48
53
49
54
// LogReconnectFailed is logged when reconnect attempt failed.
50
55
LogReconnectFailed ConnLogKind = iota + 1
@@ -134,10 +139,19 @@ func (d defaultLogger) Report(event ConnLogKind, conn *Connection, v ...interfac
134
139
// always returns array of array (array of tuples for space related methods).
135
140
// For Eval* and Call* Tarantool always returns array, but does not forces
136
141
// array of arrays.
142
+ //
143
+ // If connected to Tarantool 2.10 or newer and WatchersFeature is required,
144
+ // connection supports server graceful shutdown. In this case, server will
145
+ // wait until all client requests will be finished and client disconnects
146
+ // before going down (server also may go down by timeout). Client reconnect will
147
+ // happen if connection options enable reconnect.
148
+ //
149
+ // More on graceful shutdown: https://www.tarantool.io/en/doc/latest/dev_guide/internals/iproto/graceful_shutdown/
137
150
type Connection struct {
138
151
addr string
139
152
c net.Conn
140
153
mutex sync.Mutex
154
+ cond * sync.Cond
141
155
// Schema contains schema loaded on connection.
142
156
Schema * Schema
143
157
// requestId contains the last request ID for requests with nil context.
@@ -162,6 +176,11 @@ type Connection struct {
162
176
serverProtocolInfo ProtocolInfo
163
177
// watchMap is a map of key -> chan watchState.
164
178
watchMap sync.Map
179
+
180
+ // shutdownWatcher is the "box.shutdown" event watcher.
181
+ shutdownWatcher Watcher
182
+ // requestCnt is a counter of active requests.
183
+ requestCnt uint32
165
184
}
166
185
167
186
var _ = Connector (& Connection {}) // Check compatibility with connector interface.
@@ -385,6 +404,8 @@ func Connect(addr string, opts Opts) (conn *Connection, err error) {
385
404
conn .opts .Logger = defaultLogger {}
386
405
}
387
406
407
+ conn .cond = sync .NewCond (& conn .mutex )
408
+
388
409
if err = conn .createConnection (false ); err != nil {
389
410
ter , ok := err .(Error )
390
411
if conn .opts .Reconnect <= 0 {
@@ -421,6 +442,16 @@ func Connect(addr string, opts Opts) (conn *Connection, err error) {
421
442
}
422
443
}
423
444
445
+ // Subscribe shutdown event to process graceful shutdown.
446
+ if conn .isWatchersRequired () {
447
+ watcher , werr := conn .NewWatcher (shutdownEventKey , shutdownEventCallback )
448
+ if werr != nil {
449
+ conn .closeConnection (werr , true )
450
+ return nil , werr
451
+ }
452
+ conn .shutdownWatcher = watcher
453
+ }
454
+
424
455
return conn , err
425
456
}
426
457
@@ -589,6 +620,7 @@ func (conn *Connection) dial() (err error) {
589
620
conn .lockShards ()
590
621
conn .c = connection
591
622
atomic .StoreUint32 (& conn .state , connConnected )
623
+ conn .cond .Broadcast ()
592
624
conn .unlockShards ()
593
625
go conn .writer (w , connection )
594
626
go conn .reader (r , connection )
@@ -762,10 +794,19 @@ func (conn *Connection) closeConnection(neterr error, forever bool) (err error)
762
794
if conn .state != connClosed {
763
795
close (conn .control )
764
796
atomic .StoreUint32 (& conn .state , connClosed )
797
+ conn .cond .Broadcast ()
798
+ // Free the resources.
799
+ if conn .shutdownWatcher != nil {
800
+ conn .mutex .Unlock ()
801
+ conn .shutdownWatcher .Unregister ()
802
+ conn .mutex .Lock ()
803
+ conn .shutdownWatcher = nil
804
+ }
765
805
conn .notify (Closed )
766
806
}
767
807
} else {
768
808
atomic .StoreUint32 (& conn .state , connDisconnected )
809
+ conn .cond .Broadcast ()
769
810
conn .notify (Disconnected )
770
811
}
771
812
if conn .c != nil {
@@ -797,6 +838,7 @@ func (conn *Connection) reconnect(neterr error, c net.Conn) {
797
838
} else {
798
839
conn .closeConnection (neterr , true )
799
840
}
841
+ conn .cond .Broadcast ()
800
842
}
801
843
802
844
func (conn * Connection ) lockShards () {
@@ -1026,6 +1068,15 @@ func (conn *Connection) newFuture(ctx context.Context) (fut *Future) {
1026
1068
fut .done = nil
1027
1069
shard .rmut .Unlock ()
1028
1070
return
1071
+ case connShutdown :
1072
+ fut .err = ClientError {
1073
+ ErrConnectionShutdown ,
1074
+ "server shutdown in progress" ,
1075
+ }
1076
+ fut .ready = nil
1077
+ fut .done = nil
1078
+ shard .rmut .Unlock ()
1079
+ return
1029
1080
}
1030
1081
pos := (fut .requestId / conn .opts .Concurrency ) & (requestsMap - 1 )
1031
1082
if ctx != nil {
@@ -1086,6 +1137,7 @@ func (conn *Connection) send(req Request, streamId uint64) *Future {
1086
1137
if fut .ready == nil {
1087
1138
return fut
1088
1139
}
1140
+
1089
1141
if req .Ctx () != nil {
1090
1142
select {
1091
1143
case <- req .Ctx ().Done ():
@@ -1094,13 +1146,31 @@ func (conn *Connection) send(req Request, streamId uint64) *Future {
1094
1146
default :
1095
1147
}
1096
1148
}
1149
+
1150
+ if conn .shutdownWatcher != nil {
1151
+ atomic .AddUint32 (& (conn .requestCnt ), uint32 (1 ))
1152
+ go conn .gracefulWait (fut )
1153
+ }
1154
+
1097
1155
conn .putFuture (fut , req , streamId )
1156
+
1098
1157
if req .Ctx () != nil {
1099
1158
go conn .contextWatchdog (fut , req .Ctx ())
1100
1159
}
1160
+
1101
1161
return fut
1102
1162
}
1103
1163
1164
+ func (conn * Connection ) gracefulWait (fut * Future ) {
1165
+ <- fut .done
1166
+ // This is a real advice from Go documentation
1167
+ // about how to decrement atomic uint32.
1168
+ // https://pkg.go.dev/sync/atomic#AddUint32
1169
+ if atomic .AddUint32 (& (conn .requestCnt ), ^ uint32 (0 )) == 0 {
1170
+ conn .cond .Broadcast ()
1171
+ }
1172
+ }
1173
+
1104
1174
func (conn * Connection ) putFuture (fut * Future , req Request , streamId uint64 ) {
1105
1175
shardn := fut .requestId & (conn .opts .Concurrency - 1 )
1106
1176
shard := & conn .shard [shardn ]
@@ -1458,6 +1528,15 @@ func subscribeWatchChannel(conn *Connection, key string) (chan watchState, error
1458
1528
return st , nil
1459
1529
}
1460
1530
1531
+ func (conn * Connection ) isWatchersRequired () bool {
1532
+ for _ , feature := range conn .opts .RequiredProtocolInfo .Features {
1533
+ if feature == WatchersFeature {
1534
+ return true
1535
+ }
1536
+ }
1537
+ return false
1538
+ }
1539
+
1461
1540
// NewWatcher creates a new Watcher object for the connection.
1462
1541
//
1463
1542
// You need to require WatchersFeature to use watchers, see examples for the
@@ -1496,15 +1575,7 @@ func (conn *Connection) NewWatcher(key string, callback WatchCallback) (Watcher,
1496
1575
// asynchronous. We do not expect any response from a Tarantool instance
1497
1576
// That's why we can't just check the Tarantool response for an unsupported
1498
1577
// request error.
1499
- watchersRequired := false
1500
- for _ , feature := range conn .opts .RequiredProtocolInfo .Features {
1501
- if feature == WatchersFeature {
1502
- watchersRequired = true
1503
- break
1504
- }
1505
- }
1506
-
1507
- if ! watchersRequired {
1578
+ if ! conn .isWatchersRequired () {
1508
1579
err := fmt .Errorf ("the feature %s must be required by connection " +
1509
1580
"options to create a watcher" , WatchersFeature )
1510
1581
return nil , err
@@ -1563,7 +1634,11 @@ func (conn *Connection) NewWatcher(key string, callback WatchCallback) (Watcher,
1563
1634
1564
1635
if state .cnt == 0 {
1565
1636
// The last one sends IPROTO_UNWATCH.
1566
- conn .Do (newUnwatchRequest (key )).Get ()
1637
+ if ! conn .ClosedNow () {
1638
+ // conn.ClosedNow() check is a workaround for calling
1639
+ // Unregister from connectionClose().
1640
+ conn .Do (newUnwatchRequest (key )).Get ()
1641
+ }
1567
1642
conn .watchMap .Delete (key )
1568
1643
close (state .unready )
1569
1644
}
@@ -1666,3 +1741,52 @@ func (conn *Connection) ServerProtocolInfo() ProtocolInfo {
1666
1741
func (conn * Connection ) ClientProtocolInfo () ProtocolInfo {
1667
1742
return clientProtocolInfo .Clone ()
1668
1743
}
1744
+
1745
+ func shutdownEventCallback (event WatchEvent ) {
1746
+ // Receives "true" on server shutdown.
1747
+ // See https://www.tarantool.io/en/doc/latest/dev_guide/internals/iproto/graceful_shutdown/
1748
+ // step 2.
1749
+ val , ok := event .Value .(bool )
1750
+ if ok && val {
1751
+ go event .Conn .processShutdown ()
1752
+ }
1753
+ }
1754
+
1755
+ func (conn * Connection ) processShutdown () {
1756
+ // Forbid state changes.
1757
+ conn .mutex .Lock ()
1758
+ defer conn .mutex .Unlock ()
1759
+
1760
+ atomic .StoreUint32 (& (conn .state ), connShutdown )
1761
+ conn .notify (Shutdown )
1762
+
1763
+ c := conn .c
1764
+ for (atomic .LoadUint32 (& (conn .state )) == connShutdown ) &&
1765
+ (atomic .LoadUint32 (& (conn .requestCnt )) != 0 ) &&
1766
+ (c == conn .c ) {
1767
+ // Use cond var on conn.mutex since request execution may
1768
+ // call reconnect(). It is ok if state changes as part of
1769
+ // reconnect since Tarantool server won't allow to reconnect
1770
+ // in the middle of shutting down.
1771
+ conn .cond .Wait ()
1772
+ }
1773
+ // Do not unregister task explicitly here since connection teardown
1774
+ // has the same effect. To clean up connection resources,
1775
+ // unregister on full close.
1776
+
1777
+ if (atomic .LoadUint32 (& (conn .state )) == connShutdown ) &&
1778
+ (c == conn .c ) {
1779
+ // Start to reconnect based on common rules, same as in net.box.
1780
+ // Reconnect also closes the connection: server waits until all
1781
+ // subscribed connections are terminated.
1782
+ // See https://www.tarantool.io/en/doc/latest/dev_guide/internals/iproto/graceful_shutdown/
1783
+ // step 3.
1784
+ conn .mutex .Unlock ()
1785
+ conn .reconnect (
1786
+ ClientError {
1787
+ ErrConnectionClosed ,
1788
+ "connection closed after server shutdown" ,
1789
+ }, conn .c )
1790
+ conn .mutex .Lock ()
1791
+ }
1792
+ }
0 commit comments