Changed remote loader to use a timeout

rather than a hard block, it will wait 50ms then try again, regardless of cores, so it won’t ever block fully.

Signed-off-by: quobix <dave@quobix.com>
This commit is contained in:
quobix
2023-11-01 14:29:52 -04:00
parent d096163f0e
commit 276c3959fd
3 changed files with 796 additions and 815 deletions

View File

@@ -4,17 +4,16 @@
package index package index
import ( import (
"context"
"errors" "errors"
"fmt" "fmt"
"github.com/pb33f/libopenapi/datamodel" "github.com/pb33f/libopenapi/datamodel"
"github.com/pb33f/libopenapi/utils" "github.com/pb33f/libopenapi/utils"
"log/slog"
"runtime"
"golang.org/x/sync/syncmap" "golang.org/x/sync/syncmap"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
"io" "io"
"io/fs" "io/fs"
"log/slog"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
@@ -261,21 +260,26 @@ func (i *RemoteFS) Open(remoteURL string) (fs.File, error) {
// if we're processing, we need to block and wait for the file to be processed // if we're processing, we need to block and wait for the file to be processed
// try path first // try path first
if _, ok := i.ProcessingFiles.Load(remoteParsedURL.Path); ok { if _, ok := i.ProcessingFiles.Load(remoteParsedURL.Path); ok {
// we can't block if we only have a couple of CPUs, as we'll deadlock / run super slow, only when we're running in parallel
// can we block threads.
if runtime.GOMAXPROCS(-1) > 2 {
i.logger.Debug("waiting for existing fetch to complete", "file", remoteURL, "remoteURL", remoteParsedURL.String())
f := make(chan *RemoteFile) i.logger.Debug("waiting for existing fetch to complete", "file", remoteURL, "remoteURL", remoteParsedURL.String())
fwait := func(path string, c chan *RemoteFile) { // Create a context with a timeout of 50ms
for { ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Millisecond*50)
if wf, ko := i.Files.Load(remoteParsedURL.Path); ko { defer cancel()
c <- wf.(*RemoteFile) f := make(chan *RemoteFile)
} fwait := func(path string, c chan *RemoteFile) {
for {
if wf, ko := i.Files.Load(remoteParsedURL.Path); ko {
c <- wf.(*RemoteFile)
} }
} }
go fwait(remoteParsedURL.Path, f) }
return <-f, nil go fwait(remoteParsedURL.Path, f)
select {
case <-ctxTimeout.Done():
i.logger.Info("waiting for remote file timed out, trying again", "file", remoteURL, "remoteURL", remoteParsedURL.String())
case v := <-f:
return v, nil
} }
} }

File diff suppressed because it is too large Load Diff

View File

@@ -142,7 +142,7 @@ func TestSpecIndex_DigitalOcean(t *testing.T) {
cf.AllowRemoteLookup = true cf.AllowRemoteLookup = true
cf.AvoidCircularReferenceCheck = true cf.AvoidCircularReferenceCheck = true
cf.Logger = slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{ cf.Logger = slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
Level: slog.LevelError, Level: slog.LevelInfo,
})) }))
// setting this baseURL will override the base // setting this baseURL will override the base