update deps
All checks were successful
continuous-integration/drone Build is passing

This commit is contained in:
2022-12-16 14:22:07 +02:00
parent 0f33c8a544
commit 2709394519
50 changed files with 1103 additions and 1197 deletions

View File

@@ -8,4 +8,4 @@
package version // import "go.mongodb.org/mongo-driver/version"
// Driver is the current version of the driver.
var Driver = "v1.11.0"
var Driver = "v1.11.1"

View File

@@ -59,8 +59,9 @@ type RetryablePoolError interface {
Retryable() bool
}
// LabeledError is an error that can have error labels added to it.
type LabeledError interface {
// labeledError is an error that can have error labels added to it.
type labeledError interface {
error
HasErrorLabel(string) bool
}
@@ -398,9 +399,19 @@ func (op Operation) Execute(ctx context.Context) error {
// Set the previous indefinite error to be returned in any case where a retryable write error does not have a
// NoWritesPerfomed label (the definite case).
switch err := err.(type) {
case LabeledError:
case labeledError:
// If the "prevIndefiniteErr" is nil, then the current error is the first error encountered
// during the retry attempt cycle. We must persist the first error in the case where all
// following errors are labeled "NoWritesPerformed", which would otherwise raise nil as the
// error.
if prevIndefiniteErr == nil {
prevIndefiniteErr = err
}
// If the error is not labeled NoWritesPerformed and is retryable, then set the previous
// indefinite error to be the current error.
if !err.HasErrorLabel(NoWritesPerformed) && err.HasErrorLabel(RetryableWriteError) {
prevIndefiniteErr = err.(error)
prevIndefiniteErr = err
}
}
@@ -595,6 +606,13 @@ func (op Operation) Execute(ctx context.Context) error {
finishedInfo.cmdErr = err
op.publishFinishedEvent(ctx, finishedInfo)
// prevIndefiniteErrorIsSet is "true" if the "err" variable has been set to the "prevIndefiniteErr" in
// a case in the switch statement below.
var prevIndefiniteErrIsSet bool
// TODO(GODRIVER-2579): When refactoring the "Execute" method, consider creating a separate method for the
// error handling logic below. This will remove the necessity of the "checkError" goto label.
checkError:
var perr error
switch tt := err.(type) {
case WriteCommandError:
@@ -627,9 +645,13 @@ func (op Operation) Execute(ctx context.Context) error {
}
// If the error is no longer retryable and has the NoWritesPerformed label, then we should
// return the previous indefinite error.
if tt.HasErrorLabel(NoWritesPerformed) {
return prevIndefiniteErr
// set the error to the "previous indefinite error" unless the current error is already the
// "previous indefinite error". After reseting, repeat the error check.
if tt.HasErrorLabel(NoWritesPerformed) && !prevIndefiniteErrIsSet {
err = prevIndefiniteErr
prevIndefiniteErrIsSet = true
goto checkError
}
// If the operation isn't being retried, process the response
@@ -720,9 +742,13 @@ func (op Operation) Execute(ctx context.Context) error {
}
// If the error is no longer retryable and has the NoWritesPerformed label, then we should
// return the previous indefinite error.
if tt.HasErrorLabel(NoWritesPerformed) {
return prevIndefiniteErr
// set the error to the "previous indefinite error" unless the current error is already the
// "previous indefinite error". After reseting, repeat the error check.
if tt.HasErrorLabel(NoWritesPerformed) && !prevIndefiniteErrIsSet {
err = prevIndefiniteErr
prevIndefiniteErrIsSet = true
goto checkError
}
// If the operation isn't being retried, process the response

View File

@@ -520,6 +520,7 @@ func (s *Server) update() {
}
}
timeoutCnt := 0
for {
// Check if the server is disconnecting. Even if waitForNextCheck has already read from the done channel, we
// can safely read from it again because Disconnect closes the channel.
@@ -545,18 +546,42 @@ func (s *Server) update() {
continue
}
// Must hold the processErrorLock while updating the server description and clearing the
// pool. Not holding the lock leads to possible out-of-order processing of pool.clear() and
// pool.ready() calls from concurrent server description updates.
s.processErrorLock.Lock()
s.updateDescription(desc)
if err := desc.LastError; err != nil {
// Clear the pool once the description has been updated to Unknown. Pass in a nil service ID to clear
// because the monitoring routine only runs for non-load balanced deployments in which servers don't return
// IDs.
s.pool.clear(err, nil)
if isShortcut := func() bool {
// Must hold the processErrorLock while updating the server description and clearing the
// pool. Not holding the lock leads to possible out-of-order processing of pool.clear() and
// pool.ready() calls from concurrent server description updates.
s.processErrorLock.Lock()
defer s.processErrorLock.Unlock()
s.updateDescription(desc)
// Retry after the first timeout before clearing the pool in case of a FAAS pause as
// described in GODRIVER-2577.
if err := unwrapConnectionError(desc.LastError); err != nil && timeoutCnt < 1 {
if err == context.Canceled || err == context.DeadlineExceeded {
timeoutCnt++
// We want to immediately retry on timeout error. Continue to next loop.
return true
}
if err, ok := err.(net.Error); ok && err.Timeout() {
timeoutCnt++
// We want to immediately retry on timeout error. Continue to next loop.
return true
}
}
if err := desc.LastError; err != nil {
// Clear the pool once the description has been updated to Unknown. Pass in a nil service ID to clear
// because the monitoring routine only runs for non-load balanced deployments in which servers don't return
// IDs.
s.pool.clear(err, nil)
}
// We're either not handling a timeout error, or we just handled the 2nd consecutive
// timeout error. In either case, reset the timeout count to 0 and return false to
// continue the normal check process.
timeoutCnt = 0
return false
}(); isShortcut {
continue
}
s.processErrorLock.Unlock()
// If the server supports streaming or we're already streaming, we want to move to streaming the next response
// without waiting. If the server has transitioned to Unknown from a network error, we want to do another
@@ -707,19 +732,31 @@ func (s *Server) check() (description.Server, error) {
var err error
var durationNanos int64
// Create a new connection if this is the first check, the connection was closed after an error during the previous
// check, or the previous check was cancelled.
start := time.Now()
if s.conn == nil || s.conn.closed() || s.checkWasCancelled() {
// Create a new connection if this is the first check, the connection was closed after an error during the previous
// check, or the previous check was cancelled.
isNilConn := s.conn == nil
if !isNilConn {
s.publishServerHeartbeatStartedEvent(s.conn.ID(), false)
}
// Create a new connection and add it's handshake RTT as a sample.
err = s.setupHeartbeatConnection()
durationNanos = time.Since(start).Nanoseconds()
if err == nil {
// Use the description from the connection handshake as the value for this check.
s.rttMonitor.addSample(s.conn.helloRTT)
descPtr = &s.conn.desc
if !isNilConn {
s.publishServerHeartbeatSucceededEvent(s.conn.ID(), durationNanos, s.conn.desc, false)
}
} else {
err = unwrapConnectionError(err)
if !isNilConn {
s.publishServerHeartbeatFailedEvent(s.conn.ID(), durationNanos, err, false)
}
}
}
if descPtr == nil && err == nil {
} else {
// An existing connection is being used. Use the server description properties to execute the right heartbeat.
// Wrap conn in a type that implements driver.StreamerConnection.
@@ -729,7 +766,6 @@ func (s *Server) check() (description.Server, error) {
streamable := previousDescription.TopologyVersion != nil
s.publishServerHeartbeatStartedEvent(s.conn.ID(), s.conn.getCurrentlyStreaming() || streamable)
start := time.Now()
switch {
case s.conn.getCurrentlyStreaming():
// The connection is already in a streaming state, so we stream the next response.