mirror of
https://github.com/LukeHagar/libopenapi.git
synced 2025-12-06 12:37:49 +00:00
Removed some dead code that does not need to exist
A consequence of the old index design, now gone Signed-off-by: quobix <dave@quobix.com>
This commit is contained in:
@@ -4,393 +4,392 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pb33f/libopenapi/datamodel"
|
||||
"github.com/pb33f/libopenapi/utils"
|
||||
"log/slog"
|
||||
"runtime"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/pb33f/libopenapi/datamodel"
|
||||
"github.com/pb33f/libopenapi/utils"
|
||||
"log/slog"
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/sync/syncmap"
|
||||
"gopkg.in/yaml.v3"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
"golang.org/x/sync/syncmap"
|
||||
"gopkg.in/yaml.v3"
|
||||
"io"
|
||||
"io/fs"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
)
|
||||
|
||||
type RemoteFS struct {
|
||||
indexConfig *SpecIndexConfig
|
||||
rootURL string
|
||||
rootURLParsed *url.URL
|
||||
RemoteHandlerFunc utils.RemoteURLHandler
|
||||
Files syncmap.Map
|
||||
ProcessingFiles syncmap.Map
|
||||
FetchTime int64
|
||||
FetchChannel chan *RemoteFile
|
||||
remoteErrors []error
|
||||
logger *slog.Logger
|
||||
defaultClient *http.Client
|
||||
extractedFiles map[string]RolodexFile
|
||||
indexConfig *SpecIndexConfig
|
||||
rootURL string
|
||||
rootURLParsed *url.URL
|
||||
RemoteHandlerFunc utils.RemoteURLHandler
|
||||
Files syncmap.Map
|
||||
ProcessingFiles syncmap.Map
|
||||
FetchTime int64
|
||||
FetchChannel chan *RemoteFile
|
||||
remoteErrors []error
|
||||
logger *slog.Logger
|
||||
extractedFiles map[string]RolodexFile
|
||||
}
|
||||
|
||||
type RemoteFile struct {
|
||||
filename string
|
||||
name string
|
||||
extension FileExtension
|
||||
data []byte
|
||||
fullPath string
|
||||
URL *url.URL
|
||||
lastModified time.Time
|
||||
seekingErrors []error
|
||||
index *SpecIndex
|
||||
parsed *yaml.Node
|
||||
offset int64
|
||||
filename string
|
||||
name string
|
||||
extension FileExtension
|
||||
data []byte
|
||||
fullPath string
|
||||
URL *url.URL
|
||||
lastModified time.Time
|
||||
seekingErrors []error
|
||||
index *SpecIndex
|
||||
parsed *yaml.Node
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (f *RemoteFile) GetFileName() string {
|
||||
return f.filename
|
||||
return f.filename
|
||||
}
|
||||
|
||||
func (f *RemoteFile) GetContent() string {
|
||||
return string(f.data)
|
||||
return string(f.data)
|
||||
}
|
||||
|
||||
func (f *RemoteFile) GetContentAsYAMLNode() (*yaml.Node, error) {
|
||||
if f.parsed != nil {
|
||||
return f.parsed, nil
|
||||
}
|
||||
if f.index != nil && f.index.root != nil {
|
||||
return f.index.root, nil
|
||||
}
|
||||
if f.data == nil {
|
||||
return nil, fmt.Errorf("no data to parse for file: %s", f.fullPath)
|
||||
}
|
||||
var root yaml.Node
|
||||
err := yaml.Unmarshal(f.data, &root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if f.index != nil && f.index.root == nil {
|
||||
f.index.root = &root
|
||||
}
|
||||
f.parsed = &root
|
||||
return &root, nil
|
||||
if f.parsed != nil {
|
||||
return f.parsed, nil
|
||||
}
|
||||
if f.index != nil && f.index.root != nil {
|
||||
return f.index.root, nil
|
||||
}
|
||||
if f.data == nil {
|
||||
return nil, fmt.Errorf("no data to parse for file: %s", f.fullPath)
|
||||
}
|
||||
var root yaml.Node
|
||||
err := yaml.Unmarshal(f.data, &root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if f.index != nil && f.index.root == nil {
|
||||
f.index.root = &root
|
||||
}
|
||||
f.parsed = &root
|
||||
return &root, nil
|
||||
}
|
||||
|
||||
func (f *RemoteFile) GetFileExtension() FileExtension {
|
||||
return f.extension
|
||||
return f.extension
|
||||
}
|
||||
|
||||
func (f *RemoteFile) GetLastModified() time.Time {
|
||||
return f.lastModified
|
||||
return f.lastModified
|
||||
}
|
||||
|
||||
func (f *RemoteFile) GetErrors() []error {
|
||||
return f.seekingErrors
|
||||
return f.seekingErrors
|
||||
}
|
||||
|
||||
func (f *RemoteFile) GetFullPath() string {
|
||||
return f.fullPath
|
||||
return f.fullPath
|
||||
}
|
||||
|
||||
// fs.FileInfo interfaces
|
||||
|
||||
func (f *RemoteFile) Name() string {
|
||||
return f.name
|
||||
return f.name
|
||||
}
|
||||
|
||||
func (f *RemoteFile) Size() int64 {
|
||||
return int64(len(f.data))
|
||||
return int64(len(f.data))
|
||||
}
|
||||
|
||||
func (f *RemoteFile) Mode() fs.FileMode {
|
||||
return fs.FileMode(0)
|
||||
return fs.FileMode(0)
|
||||
}
|
||||
|
||||
func (f *RemoteFile) ModTime() time.Time {
|
||||
return f.lastModified
|
||||
return f.lastModified
|
||||
}
|
||||
|
||||
func (f *RemoteFile) IsDir() bool {
|
||||
return false
|
||||
return false
|
||||
}
|
||||
|
||||
// fs.File interfaces
|
||||
|
||||
func (f *RemoteFile) Sys() interface{} {
|
||||
return nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *RemoteFile) Close() error {
|
||||
return nil
|
||||
return nil
|
||||
}
|
||||
func (f *RemoteFile) Stat() (fs.FileInfo, error) {
|
||||
return f, nil
|
||||
return f, nil
|
||||
}
|
||||
func (f *RemoteFile) Read(b []byte) (int, error) {
|
||||
if f.offset >= int64(len(f.data)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if f.offset < 0 {
|
||||
return 0, &fs.PathError{Op: "read", Path: f.name, Err: fs.ErrInvalid}
|
||||
}
|
||||
n := copy(b, f.data[f.offset:])
|
||||
f.offset += int64(n)
|
||||
return n, nil
|
||||
if f.offset >= int64(len(f.data)) {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if f.offset < 0 {
|
||||
return 0, &fs.PathError{Op: "read", Path: f.name, Err: fs.ErrInvalid}
|
||||
}
|
||||
n := copy(b, f.data[f.offset:])
|
||||
f.offset += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (f *RemoteFile) Index(config *SpecIndexConfig) (*SpecIndex, error) {
|
||||
|
||||
if f.index != nil {
|
||||
return f.index, nil
|
||||
}
|
||||
content := f.data
|
||||
if f.index != nil {
|
||||
return f.index, nil
|
||||
}
|
||||
content := f.data
|
||||
|
||||
// first, we must parse the content of the file
|
||||
info, err := datamodel.ExtractSpecInfoWithDocumentCheck(content, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// first, we must parse the content of the file
|
||||
info, err := datamodel.ExtractSpecInfoWithDocumentCheck(content, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
index := NewSpecIndexWithConfig(info.RootNode, config)
|
||||
index := NewSpecIndexWithConfig(info.RootNode, config)
|
||||
|
||||
index.specAbsolutePath = config.SpecAbsolutePath
|
||||
f.index = index
|
||||
return index, nil
|
||||
index.specAbsolutePath = config.SpecAbsolutePath
|
||||
f.index = index
|
||||
return index, nil
|
||||
}
|
||||
func (f *RemoteFile) GetIndex() *SpecIndex {
|
||||
return f.index
|
||||
return f.index
|
||||
}
|
||||
|
||||
type FileExtension int
|
||||
|
||||
const (
|
||||
YAML FileExtension = iota
|
||||
JSON
|
||||
UNSUPPORTED
|
||||
YAML FileExtension = iota
|
||||
JSON
|
||||
UNSUPPORTED
|
||||
)
|
||||
|
||||
func NewRemoteFSWithConfig(specIndexConfig *SpecIndexConfig) (*RemoteFS, error) {
|
||||
if specIndexConfig == nil {
|
||||
return nil, errors.New("no spec index config provided")
|
||||
}
|
||||
remoteRootURL := specIndexConfig.BaseURL
|
||||
log := specIndexConfig.Logger
|
||||
if log == nil {
|
||||
log = slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelError,
|
||||
}))
|
||||
}
|
||||
if specIndexConfig == nil {
|
||||
return nil, errors.New("no spec index config provided")
|
||||
}
|
||||
remoteRootURL := specIndexConfig.BaseURL
|
||||
log := specIndexConfig.Logger
|
||||
if log == nil {
|
||||
log = slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelError,
|
||||
}))
|
||||
}
|
||||
|
||||
rfs := &RemoteFS{
|
||||
indexConfig: specIndexConfig,
|
||||
logger: log,
|
||||
rootURLParsed: remoteRootURL,
|
||||
FetchChannel: make(chan *RemoteFile),
|
||||
}
|
||||
if remoteRootURL != nil {
|
||||
rfs.rootURL = remoteRootURL.String()
|
||||
}
|
||||
if specIndexConfig.RemoteURLHandler != nil {
|
||||
rfs.RemoteHandlerFunc = specIndexConfig.RemoteURLHandler
|
||||
} else {
|
||||
// default http client
|
||||
client := &http.Client{
|
||||
Timeout: time.Second * 120,
|
||||
}
|
||||
rfs.RemoteHandlerFunc = func(url string) (*http.Response, error) {
|
||||
return client.Get(url)
|
||||
}
|
||||
}
|
||||
return rfs, nil
|
||||
rfs := &RemoteFS{
|
||||
indexConfig: specIndexConfig,
|
||||
logger: log,
|
||||
rootURLParsed: remoteRootURL,
|
||||
FetchChannel: make(chan *RemoteFile),
|
||||
}
|
||||
if remoteRootURL != nil {
|
||||
rfs.rootURL = remoteRootURL.String()
|
||||
}
|
||||
if specIndexConfig.RemoteURLHandler != nil {
|
||||
rfs.RemoteHandlerFunc = specIndexConfig.RemoteURLHandler
|
||||
} else {
|
||||
// default http client
|
||||
client := &http.Client{
|
||||
Timeout: time.Second * 120,
|
||||
}
|
||||
rfs.RemoteHandlerFunc = func(url string) (*http.Response, error) {
|
||||
return client.Get(url)
|
||||
}
|
||||
}
|
||||
return rfs, nil
|
||||
}
|
||||
|
||||
func NewRemoteFSWithRootURL(rootURL string) (*RemoteFS, error) {
|
||||
remoteRootURL, err := url.Parse(rootURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config := CreateOpenAPIIndexConfig()
|
||||
config.BaseURL = remoteRootURL
|
||||
return NewRemoteFSWithConfig(config)
|
||||
remoteRootURL, err := url.Parse(rootURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config := CreateOpenAPIIndexConfig()
|
||||
config.BaseURL = remoteRootURL
|
||||
return NewRemoteFSWithConfig(config)
|
||||
}
|
||||
|
||||
func (i *RemoteFS) SetRemoteHandlerFunc(handlerFunc utils.RemoteURLHandler) {
|
||||
i.RemoteHandlerFunc = handlerFunc
|
||||
i.RemoteHandlerFunc = handlerFunc
|
||||
}
|
||||
|
||||
func (i *RemoteFS) SetIndexConfig(config *SpecIndexConfig) {
|
||||
i.indexConfig = config
|
||||
i.indexConfig = config
|
||||
}
|
||||
|
||||
func (i *RemoteFS) GetFiles() map[string]RolodexFile {
|
||||
files := make(map[string]RolodexFile)
|
||||
i.Files.Range(func(key, value interface{}) bool {
|
||||
files[key.(string)] = value.(*RemoteFile)
|
||||
return true
|
||||
})
|
||||
i.extractedFiles = files
|
||||
return files
|
||||
files := make(map[string]RolodexFile)
|
||||
i.Files.Range(func(key, value interface{}) bool {
|
||||
files[key.(string)] = value.(*RemoteFile)
|
||||
return true
|
||||
})
|
||||
i.extractedFiles = files
|
||||
return files
|
||||
}
|
||||
|
||||
func (i *RemoteFS) GetErrors() []error {
|
||||
return i.remoteErrors
|
||||
return i.remoteErrors
|
||||
}
|
||||
|
||||
func (i *RemoteFS) Open(remoteURL string) (fs.File, error) {
|
||||
|
||||
if i.indexConfig != nil && !i.indexConfig.AllowRemoteLookup {
|
||||
return nil, fmt.Errorf("remote lookup for '%s' is not allowed, please set "+
|
||||
"AllowRemoteLookup to true as part of the index configuration", remoteURL)
|
||||
}
|
||||
if i.indexConfig != nil && !i.indexConfig.AllowRemoteLookup {
|
||||
return nil, fmt.Errorf("remote lookup for '%s' is not allowed, please set "+
|
||||
"AllowRemoteLookup to true as part of the index configuration", remoteURL)
|
||||
}
|
||||
|
||||
remoteParsedURL, err := url.Parse(remoteURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteParsedURLOriginal, _ := url.Parse(remoteURL)
|
||||
remoteParsedURL, err := url.Parse(remoteURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
remoteParsedURLOriginal, _ := url.Parse(remoteURL)
|
||||
|
||||
// try path first
|
||||
if r, ok := i.Files.Load(remoteParsedURL.Path); ok {
|
||||
return r.(*RemoteFile), nil
|
||||
}
|
||||
// try path first
|
||||
if r, ok := i.Files.Load(remoteParsedURL.Path); ok {
|
||||
return r.(*RemoteFile), nil
|
||||
}
|
||||
|
||||
// if we're processing, we need to block and wait for the file to be processed
|
||||
// try path first
|
||||
if _, ok := i.ProcessingFiles.Load(remoteParsedURL.Path); ok {
|
||||
// we can't block if we only have a couple of CPUs, as we'll deadlock / run super slow, only when we're running in parallel
|
||||
// can we block threads.
|
||||
if runtime.GOMAXPROCS(-1) > 2 {
|
||||
i.logger.Debug("waiting for existing fetch to complete", "file", remoteURL, "remoteURL", remoteParsedURL.String())
|
||||
// if we're processing, we need to block and wait for the file to be processed
|
||||
// try path first
|
||||
if _, ok := i.ProcessingFiles.Load(remoteParsedURL.Path); ok {
|
||||
// we can't block if we only have a couple of CPUs, as we'll deadlock / run super slow, only when we're running in parallel
|
||||
// can we block threads.
|
||||
if runtime.GOMAXPROCS(-1) > 2 {
|
||||
i.logger.Debug("waiting for existing fetch to complete", "file", remoteURL, "remoteURL", remoteParsedURL.String())
|
||||
|
||||
f := make(chan *RemoteFile)
|
||||
fwait := func(path string, c chan *RemoteFile) {
|
||||
for {
|
||||
if wf, ko := i.Files.Load(remoteParsedURL.Path); ko {
|
||||
c <- wf.(*RemoteFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
go fwait(remoteParsedURL.Path, f)
|
||||
return <-f, nil
|
||||
}
|
||||
}
|
||||
f := make(chan *RemoteFile)
|
||||
fwait := func(path string, c chan *RemoteFile) {
|
||||
for {
|
||||
if wf, ko := i.Files.Load(remoteParsedURL.Path); ko {
|
||||
c <- wf.(*RemoteFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
go fwait(remoteParsedURL.Path, f)
|
||||
return <-f, nil
|
||||
}
|
||||
}
|
||||
|
||||
// add to processing
|
||||
i.ProcessingFiles.Store(remoteParsedURL.Path, true)
|
||||
// add to processing
|
||||
i.ProcessingFiles.Store(remoteParsedURL.Path, true)
|
||||
|
||||
fileExt := ExtractFileType(remoteParsedURL.Path)
|
||||
fileExt := ExtractFileType(remoteParsedURL.Path)
|
||||
|
||||
if fileExt == UNSUPPORTED {
|
||||
return nil, &fs.PathError{Op: "open", Path: remoteURL, Err: fs.ErrInvalid}
|
||||
}
|
||||
if fileExt == UNSUPPORTED {
|
||||
return nil, &fs.PathError{Op: "open", Path: remoteURL, Err: fs.ErrInvalid}
|
||||
}
|
||||
|
||||
// if the remote URL is absolute (http:// or https://), and we have a rootURL defined, we need to override
|
||||
// the host being defined by this URL, and use the rootURL instead, but keep the path.
|
||||
if i.rootURLParsed != nil {
|
||||
remoteParsedURL.Host = i.rootURLParsed.Host
|
||||
remoteParsedURL.Scheme = i.rootURLParsed.Scheme
|
||||
if !filepath.IsAbs(remoteParsedURL.Path) {
|
||||
remoteParsedURL.Path = filepath.Join(i.rootURLParsed.Path, remoteParsedURL.Path)
|
||||
}
|
||||
}
|
||||
// if the remote URL is absolute (http:// or https://), and we have a rootURL defined, we need to override
|
||||
// the host being defined by this URL, and use the rootURL instead, but keep the path.
|
||||
if i.rootURLParsed != nil {
|
||||
remoteParsedURL.Host = i.rootURLParsed.Host
|
||||
remoteParsedURL.Scheme = i.rootURLParsed.Scheme
|
||||
if !filepath.IsAbs(remoteParsedURL.Path) {
|
||||
remoteParsedURL.Path = filepath.Join(i.rootURLParsed.Path, remoteParsedURL.Path)
|
||||
}
|
||||
}
|
||||
|
||||
i.logger.Debug("loading remote file", "file", remoteURL, "remoteURL", remoteParsedURL.String())
|
||||
i.logger.Debug("loading remote file", "file", remoteURL, "remoteURL", remoteParsedURL.String())
|
||||
|
||||
response, clientErr := i.RemoteHandlerFunc(remoteParsedURL.String())
|
||||
if clientErr != nil {
|
||||
response, clientErr := i.RemoteHandlerFunc(remoteParsedURL.String())
|
||||
if clientErr != nil {
|
||||
|
||||
i.remoteErrors = append(i.remoteErrors, clientErr)
|
||||
// remove from processing
|
||||
i.ProcessingFiles.Delete(remoteParsedURL.Path)
|
||||
if response != nil {
|
||||
i.logger.Error("client error", "error", clientErr, "status", response.StatusCode)
|
||||
} else {
|
||||
i.logger.Error("client error", "error", clientErr.Error())
|
||||
}
|
||||
return nil, clientErr
|
||||
}
|
||||
if response == nil {
|
||||
return nil, fmt.Errorf("empty response from remote URL: %s", remoteParsedURL.String())
|
||||
}
|
||||
responseBytes, readError := io.ReadAll(response.Body)
|
||||
if readError != nil {
|
||||
i.remoteErrors = append(i.remoteErrors, clientErr)
|
||||
// remove from processing
|
||||
i.ProcessingFiles.Delete(remoteParsedURL.Path)
|
||||
if response != nil {
|
||||
i.logger.Error("client error", "error", clientErr, "status", response.StatusCode)
|
||||
} else {
|
||||
i.logger.Error("client error", "error", clientErr.Error())
|
||||
}
|
||||
return nil, clientErr
|
||||
}
|
||||
if response == nil {
|
||||
return nil, fmt.Errorf("empty response from remote URL: %s", remoteParsedURL.String())
|
||||
}
|
||||
responseBytes, readError := io.ReadAll(response.Body)
|
||||
if readError != nil {
|
||||
|
||||
// remove from processing
|
||||
i.ProcessingFiles.Delete(remoteParsedURL.Path)
|
||||
// remove from processing
|
||||
i.ProcessingFiles.Delete(remoteParsedURL.Path)
|
||||
|
||||
return nil, fmt.Errorf("error reading bytes from remote file '%s': [%s]",
|
||||
remoteParsedURL.String(), readError.Error())
|
||||
}
|
||||
return nil, fmt.Errorf("error reading bytes from remote file '%s': [%s]",
|
||||
remoteParsedURL.String(), readError.Error())
|
||||
}
|
||||
|
||||
if response.StatusCode >= 400 {
|
||||
if response.StatusCode >= 400 {
|
||||
|
||||
// remove from processing
|
||||
i.ProcessingFiles.Delete(remoteParsedURL.Path)
|
||||
// remove from processing
|
||||
i.ProcessingFiles.Delete(remoteParsedURL.Path)
|
||||
|
||||
i.logger.Error("unable to fetch remote document",
|
||||
"file", remoteParsedURL.Path, "status", response.StatusCode, "resp", string(responseBytes))
|
||||
return nil, fmt.Errorf("unable to fetch remote document: %s", string(responseBytes))
|
||||
}
|
||||
i.logger.Error("unable to fetch remote document",
|
||||
"file", remoteParsedURL.Path, "status", response.StatusCode, "resp", string(responseBytes))
|
||||
return nil, fmt.Errorf("unable to fetch remote document: %s", string(responseBytes))
|
||||
}
|
||||
|
||||
absolutePath, _ := filepath.Abs(remoteParsedURL.Path)
|
||||
absolutePath, _ := filepath.Abs(remoteParsedURL.Path)
|
||||
|
||||
// extract last modified from response
|
||||
lastModified := response.Header.Get("Last-Modified")
|
||||
// extract last modified from response
|
||||
lastModified := response.Header.Get("Last-Modified")
|
||||
|
||||
// parse the last modified date into a time object
|
||||
lastModifiedTime, parseErr := time.Parse(time.RFC1123, lastModified)
|
||||
// parse the last modified date into a time object
|
||||
lastModifiedTime, parseErr := time.Parse(time.RFC1123, lastModified)
|
||||
|
||||
if parseErr != nil {
|
||||
// can't extract last modified, so use now
|
||||
lastModifiedTime = time.Now()
|
||||
}
|
||||
if parseErr != nil {
|
||||
// can't extract last modified, so use now
|
||||
lastModifiedTime = time.Now()
|
||||
}
|
||||
|
||||
filename := filepath.Base(remoteParsedURL.Path)
|
||||
filename := filepath.Base(remoteParsedURL.Path)
|
||||
|
||||
remoteFile := &RemoteFile{
|
||||
filename: filename,
|
||||
name: remoteParsedURL.Path,
|
||||
extension: fileExt,
|
||||
data: responseBytes,
|
||||
fullPath: absolutePath,
|
||||
URL: remoteParsedURL,
|
||||
lastModified: lastModifiedTime,
|
||||
}
|
||||
remoteFile := &RemoteFile{
|
||||
filename: filename,
|
||||
name: remoteParsedURL.Path,
|
||||
extension: fileExt,
|
||||
data: responseBytes,
|
||||
fullPath: absolutePath,
|
||||
URL: remoteParsedURL,
|
||||
lastModified: lastModifiedTime,
|
||||
}
|
||||
|
||||
copiedCfg := *i.indexConfig
|
||||
copiedCfg := *i.indexConfig
|
||||
|
||||
newBase := fmt.Sprintf("%s://%s%s", remoteParsedURLOriginal.Scheme, remoteParsedURLOriginal.Host,
|
||||
filepath.Dir(remoteParsedURL.Path))
|
||||
newBaseURL, _ := url.Parse(newBase)
|
||||
newBase := fmt.Sprintf("%s://%s%s", remoteParsedURLOriginal.Scheme, remoteParsedURLOriginal.Host,
|
||||
filepath.Dir(remoteParsedURL.Path))
|
||||
newBaseURL, _ := url.Parse(newBase)
|
||||
|
||||
if newBaseURL != nil {
|
||||
copiedCfg.BaseURL = newBaseURL
|
||||
}
|
||||
copiedCfg.SpecAbsolutePath = remoteParsedURL.String()
|
||||
if newBaseURL != nil {
|
||||
copiedCfg.BaseURL = newBaseURL
|
||||
}
|
||||
copiedCfg.SpecAbsolutePath = remoteParsedURL.String()
|
||||
|
||||
if len(remoteFile.data) > 0 {
|
||||
i.logger.Debug("successfully loaded file", "file", absolutePath)
|
||||
}
|
||||
//i.seekRelatives(remoteFile)
|
||||
// remove from processing
|
||||
i.ProcessingFiles.Delete(remoteParsedURL.Path)
|
||||
i.Files.Store(absolutePath, remoteFile)
|
||||
if len(remoteFile.data) > 0 {
|
||||
i.logger.Debug("successfully loaded file", "file", absolutePath)
|
||||
}
|
||||
//i.seekRelatives(remoteFile)
|
||||
// remove from processing
|
||||
i.ProcessingFiles.Delete(remoteParsedURL.Path)
|
||||
i.Files.Store(absolutePath, remoteFile)
|
||||
|
||||
idx, idxError := remoteFile.Index(&copiedCfg)
|
||||
idx, idxError := remoteFile.Index(&copiedCfg)
|
||||
|
||||
if idxError != nil && idx == nil {
|
||||
i.remoteErrors = append(i.remoteErrors, idxError)
|
||||
} else {
|
||||
if idxError != nil && idx == nil {
|
||||
i.remoteErrors = append(i.remoteErrors, idxError)
|
||||
} else {
|
||||
|
||||
// for each index, we need a resolver
|
||||
resolver := NewResolver(idx)
|
||||
idx.resolver = resolver
|
||||
idx.BuildIndex()
|
||||
}
|
||||
return remoteFile, errors.Join(i.remoteErrors...)
|
||||
// for each index, we need a resolver
|
||||
resolver := NewResolver(idx)
|
||||
idx.resolver = resolver
|
||||
idx.BuildIndex()
|
||||
}
|
||||
return remoteFile, errors.Join(i.remoteErrors...)
|
||||
}
|
||||
|
||||
@@ -4,383 +4,382 @@
|
||||
package index
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
var test_httpClient = &http.Client{Timeout: time.Duration(60) * time.Second}
|
||||
|
||||
func test_buildServer() *httptest.Server {
|
||||
return httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
if req.URL.String() == "/file1.yaml" {
|
||||
rw.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT")
|
||||
_, _ = rw.Write([]byte(`"$ref": "./deeper/file2.yaml#/components/schemas/Pet"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/deeper/file2.yaml" {
|
||||
rw.Header().Set("Last-Modified", "Wed, 21 Oct 2015 08:28:00 GMT")
|
||||
_, _ = rw.Write([]byte(`"$ref": "/deeper/even_deeper/file3.yaml#/components/schemas/Pet"`))
|
||||
return
|
||||
}
|
||||
return httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
if req.URL.String() == "/file1.yaml" {
|
||||
rw.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT")
|
||||
_, _ = rw.Write([]byte(`"$ref": "./deeper/file2.yaml#/components/schemas/Pet"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/deeper/file2.yaml" {
|
||||
rw.Header().Set("Last-Modified", "Wed, 21 Oct 2015 08:28:00 GMT")
|
||||
_, _ = rw.Write([]byte(`"$ref": "/deeper/even_deeper/file3.yaml#/components/schemas/Pet"`))
|
||||
return
|
||||
}
|
||||
|
||||
if req.URL.String() == "/deeper/even_deeper/file3.yaml" {
|
||||
rw.Header().Set("Last-Modified", "Wed, 21 Oct 2015 10:28:00 GMT")
|
||||
_, _ = rw.Write([]byte(`"$ref": "../file2.yaml#/components/schemas/Pet"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/deeper/even_deeper/file3.yaml" {
|
||||
rw.Header().Set("Last-Modified", "Wed, 21 Oct 2015 10:28:00 GMT")
|
||||
_, _ = rw.Write([]byte(`"$ref": "../file2.yaml#/components/schemas/Pet"`))
|
||||
return
|
||||
}
|
||||
|
||||
rw.Header().Set("Last-Modified", "Wed, 21 Oct 2015 12:28:00 GMT")
|
||||
rw.Header().Set("Last-Modified", "Wed, 21 Oct 2015 12:28:00 GMT")
|
||||
|
||||
if req.URL.String() == "/deeper/list.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "../file2.yaml"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/deeper/list.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "../file2.yaml"`))
|
||||
return
|
||||
}
|
||||
|
||||
if req.URL.String() == "/bag/list.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "pocket/list.yaml"\n\n"$ref": "zip/things.yaml"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/bag/list.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "pocket/list.yaml"\n\n"$ref": "zip/things.yaml"`))
|
||||
return
|
||||
}
|
||||
|
||||
if req.URL.String() == "/bag/pocket/list.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "../list.yaml"\n\n"$ref": "../../file2.yaml"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/bag/pocket/list.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "../list.yaml"\n\n"$ref": "../../file2.yaml"`))
|
||||
return
|
||||
}
|
||||
|
||||
if req.URL.String() == "/bag/pocket/things.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "list.yaml"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/bag/pocket/things.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "list.yaml"`))
|
||||
return
|
||||
}
|
||||
|
||||
if req.URL.String() == "/bag/zip/things.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "list.yaml"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/bag/zip/things.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "list.yaml"`))
|
||||
return
|
||||
}
|
||||
|
||||
if req.URL.String() == "/bag/zip/list.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "../list.yaml"\n\n"$ref": "../../file1.yaml"\n\n"$ref": "more.yaml""`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/bag/zip/list.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "../list.yaml"\n\n"$ref": "../../file1.yaml"\n\n"$ref": "more.yaml""`))
|
||||
return
|
||||
}
|
||||
|
||||
if req.URL.String() == "/bag/zip/more.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "../../deeper/list.yaml"\n\n"$ref": "../../bad.yaml"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/bag/zip/more.yaml" {
|
||||
_, _ = rw.Write([]byte(`"$ref": "../../deeper/list.yaml"\n\n"$ref": "../../bad.yaml"`))
|
||||
return
|
||||
}
|
||||
|
||||
if req.URL.String() == "/bad.yaml" {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = rw.Write([]byte(`"error, cannot do the thing"`))
|
||||
return
|
||||
}
|
||||
if req.URL.String() == "/bad.yaml" {
|
||||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
_, _ = rw.Write([]byte(`"error, cannot do the thing"`))
|
||||
return
|
||||
}
|
||||
|
||||
_, _ = rw.Write([]byte(`OK`))
|
||||
}))
|
||||
_, _ = rw.Write([]byte(`OK`))
|
||||
}))
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_BasicCheck(t *testing.T) {
|
||||
|
||||
server := test_buildServer()
|
||||
defer server.Close()
|
||||
server := test_buildServer()
|
||||
defer server.Close()
|
||||
|
||||
//remoteFS := NewRemoteFS("https://raw.githubusercontent.com/digitalocean/openapi/main/specification/")
|
||||
remoteFS, _ := NewRemoteFSWithRootURL(server.URL)
|
||||
remoteFS.RemoteHandlerFunc = test_httpClient.Get
|
||||
//remoteFS := NewRemoteFS("https://raw.githubusercontent.com/digitalocean/openapi/main/specification/")
|
||||
remoteFS, _ := NewRemoteFSWithRootURL(server.URL)
|
||||
remoteFS.RemoteHandlerFunc = test_httpClient.Get
|
||||
|
||||
file, err := remoteFS.Open("/file1.yaml")
|
||||
file, err := remoteFS.Open("/file1.yaml")
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bytes, rErr := io.ReadAll(file)
|
||||
assert.NoError(t, rErr)
|
||||
bytes, rErr := io.ReadAll(file)
|
||||
assert.NoError(t, rErr)
|
||||
|
||||
stat, _ := file.Stat()
|
||||
stat, _ := file.Stat()
|
||||
|
||||
assert.Equal(t, "/file1.yaml", stat.Name())
|
||||
assert.Equal(t, int64(53), stat.Size())
|
||||
assert.Len(t, bytes, 53)
|
||||
assert.Equal(t, "/file1.yaml", stat.Name())
|
||||
assert.Equal(t, int64(53), stat.Size())
|
||||
assert.Len(t, bytes, 53)
|
||||
|
||||
lastMod := stat.ModTime()
|
||||
assert.Equal(t, "2015-10-21 07:28:00 +0000 GMT", lastMod.String())
|
||||
lastMod := stat.ModTime()
|
||||
assert.Equal(t, "2015-10-21 07:28:00 +0000 GMT", lastMod.String())
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_BasicCheck_Relative(t *testing.T) {
|
||||
|
||||
server := test_buildServer()
|
||||
defer server.Close()
|
||||
server := test_buildServer()
|
||||
defer server.Close()
|
||||
|
||||
remoteFS, _ := NewRemoteFSWithRootURL(server.URL)
|
||||
remoteFS.RemoteHandlerFunc = test_httpClient.Get
|
||||
remoteFS, _ := NewRemoteFSWithRootURL(server.URL)
|
||||
remoteFS.RemoteHandlerFunc = test_httpClient.Get
|
||||
|
||||
file, err := remoteFS.Open("/deeper/file2.yaml")
|
||||
file, err := remoteFS.Open("/deeper/file2.yaml")
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bytes, rErr := io.ReadAll(file)
|
||||
assert.NoError(t, rErr)
|
||||
bytes, rErr := io.ReadAll(file)
|
||||
assert.NoError(t, rErr)
|
||||
|
||||
assert.Len(t, bytes, 64)
|
||||
assert.Len(t, bytes, 64)
|
||||
|
||||
stat, _ := file.Stat()
|
||||
stat, _ := file.Stat()
|
||||
|
||||
assert.Equal(t, "/deeper/file2.yaml", stat.Name())
|
||||
assert.Equal(t, int64(64), stat.Size())
|
||||
assert.Equal(t, "/deeper/file2.yaml", stat.Name())
|
||||
assert.Equal(t, int64(64), stat.Size())
|
||||
|
||||
lastMod := stat.ModTime()
|
||||
assert.Equal(t, "2015-10-21 08:28:00 +0000 GMT", lastMod.String())
|
||||
lastMod := stat.ModTime()
|
||||
assert.Equal(t, "2015-10-21 08:28:00 +0000 GMT", lastMod.String())
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_BasicCheck_Relative_Deeper(t *testing.T) {
|
||||
|
||||
server := test_buildServer()
|
||||
defer server.Close()
|
||||
server := test_buildServer()
|
||||
defer server.Close()
|
||||
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
u, _ := url.Parse(server.URL)
|
||||
cf.BaseURL = u
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
u, _ := url.Parse(server.URL)
|
||||
cf.BaseURL = u
|
||||
|
||||
remoteFS, _ := NewRemoteFSWithConfig(cf)
|
||||
remoteFS.RemoteHandlerFunc = test_httpClient.Get
|
||||
remoteFS, _ := NewRemoteFSWithConfig(cf)
|
||||
remoteFS.RemoteHandlerFunc = test_httpClient.Get
|
||||
|
||||
file, err := remoteFS.Open("/deeper/even_deeper/file3.yaml")
|
||||
file, err := remoteFS.Open("/deeper/even_deeper/file3.yaml")
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
bytes, rErr := io.ReadAll(file)
|
||||
assert.NoError(t, rErr)
|
||||
bytes, rErr := io.ReadAll(file)
|
||||
assert.NoError(t, rErr)
|
||||
|
||||
assert.Len(t, bytes, 47)
|
||||
assert.Len(t, bytes, 47)
|
||||
|
||||
stat, _ := file.Stat()
|
||||
stat, _ := file.Stat()
|
||||
|
||||
assert.Equal(t, "/deeper/even_deeper/file3.yaml", stat.Name())
|
||||
assert.Equal(t, int64(47), stat.Size())
|
||||
assert.Equal(t, "/deeper/even_deeper/file3.yaml", file.(*RemoteFile).Name())
|
||||
assert.Equal(t, "file3.yaml", file.(*RemoteFile).GetFileName())
|
||||
assert.Len(t, file.(*RemoteFile).GetContent(), 47)
|
||||
assert.Equal(t, YAML, file.(*RemoteFile).GetFileExtension())
|
||||
assert.NotNil(t, file.(*RemoteFile).GetLastModified())
|
||||
assert.Len(t, file.(*RemoteFile).GetErrors(), 0)
|
||||
assert.Equal(t, "/deeper/even_deeper/file3.yaml", file.(*RemoteFile).GetFullPath())
|
||||
assert.False(t, file.(*RemoteFile).IsDir())
|
||||
assert.Nil(t, file.(*RemoteFile).Sys())
|
||||
assert.Nil(t, file.(*RemoteFile).Close())
|
||||
assert.Equal(t, "/deeper/even_deeper/file3.yaml", stat.Name())
|
||||
assert.Equal(t, int64(47), stat.Size())
|
||||
assert.Equal(t, "/deeper/even_deeper/file3.yaml", file.(*RemoteFile).Name())
|
||||
assert.Equal(t, "file3.yaml", file.(*RemoteFile).GetFileName())
|
||||
assert.Len(t, file.(*RemoteFile).GetContent(), 47)
|
||||
assert.Equal(t, YAML, file.(*RemoteFile).GetFileExtension())
|
||||
assert.NotNil(t, file.(*RemoteFile).GetLastModified())
|
||||
assert.Len(t, file.(*RemoteFile).GetErrors(), 0)
|
||||
assert.Equal(t, "/deeper/even_deeper/file3.yaml", file.(*RemoteFile).GetFullPath())
|
||||
assert.False(t, file.(*RemoteFile).IsDir())
|
||||
assert.Nil(t, file.(*RemoteFile).Sys())
|
||||
assert.Nil(t, file.(*RemoteFile).Close())
|
||||
|
||||
lastMod := stat.ModTime()
|
||||
assert.Equal(t, "2015-10-21 10:28:00 +0000 GMT", lastMod.String())
|
||||
lastMod := stat.ModTime()
|
||||
assert.Equal(t, "2015-10-21 10:28:00 +0000 GMT", lastMod.String())
|
||||
}
|
||||
|
||||
func TestRemoteFile_NoContent(t *testing.T) {
|
||||
|
||||
rf := &RemoteFile{}
|
||||
x, y := rf.GetContentAsYAMLNode()
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
rf := &RemoteFile{}
|
||||
x, y := rf.GetContentAsYAMLNode()
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
}
|
||||
|
||||
func TestRemoteFile_BadContent(t *testing.T) {
|
||||
|
||||
rf := &RemoteFile{data: []byte("bad: data: on: a single: line: makes: for: unhappy: yaml"), index: &SpecIndex{}}
|
||||
x, y := rf.GetContentAsYAMLNode()
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
rf := &RemoteFile{data: []byte("bad: data: on: a single: line: makes: for: unhappy: yaml"), index: &SpecIndex{}}
|
||||
x, y := rf.GetContentAsYAMLNode()
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
}
|
||||
|
||||
func TestRemoteFile_GoodContent(t *testing.T) {
|
||||
|
||||
rf := &RemoteFile{data: []byte("good: data"), index: &SpecIndex{}}
|
||||
x, y := rf.GetContentAsYAMLNode()
|
||||
assert.NotNil(t, x)
|
||||
assert.NoError(t, y)
|
||||
assert.NotNil(t, rf.index.root)
|
||||
rf := &RemoteFile{data: []byte("good: data"), index: &SpecIndex{}}
|
||||
x, y := rf.GetContentAsYAMLNode()
|
||||
assert.NotNil(t, x)
|
||||
assert.NoError(t, y)
|
||||
assert.NotNil(t, rf.index.root)
|
||||
|
||||
// bad read
|
||||
rf.offset = -1
|
||||
d, err := io.ReadAll(rf)
|
||||
assert.Empty(t, d)
|
||||
assert.Error(t, err)
|
||||
// bad read
|
||||
rf.offset = -1
|
||||
d, err := io.ReadAll(rf)
|
||||
assert.Empty(t, d)
|
||||
assert.Error(t, err)
|
||||
|
||||
}
|
||||
|
||||
func TestRemoteFile_Index_AlreadySet(t *testing.T) {
|
||||
|
||||
rf := &RemoteFile{data: []byte("good: data"), index: &SpecIndex{}}
|
||||
x, y := rf.Index(&SpecIndexConfig{})
|
||||
assert.NotNil(t, x)
|
||||
assert.NoError(t, y)
|
||||
rf := &RemoteFile{data: []byte("good: data"), index: &SpecIndex{}}
|
||||
x, y := rf.Index(&SpecIndexConfig{})
|
||||
assert.NotNil(t, x)
|
||||
assert.NoError(t, y)
|
||||
|
||||
}
|
||||
|
||||
func TestRemoteFile_Index_BadContent(t *testing.T) {
|
||||
|
||||
rf := &RemoteFile{data: []byte("no: sleep: until: the bugs: weep")}
|
||||
x, y := rf.Index(&SpecIndexConfig{})
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
rf := &RemoteFile{data: []byte("no: sleep: until: the bugs: weep")}
|
||||
x, y := rf.Index(&SpecIndexConfig{})
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
|
||||
}
|
||||
|
||||
func TestRemoteFS_NoConfig(t *testing.T) {
|
||||
|
||||
x, y := NewRemoteFSWithConfig(nil)
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
x, y := NewRemoteFSWithConfig(nil)
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
|
||||
}
|
||||
|
||||
func TestRemoteFS_SetRemoteHandler(t *testing.T) {
|
||||
|
||||
h := func(url string) (*http.Response, error) {
|
||||
return nil, errors.New("nope")
|
||||
}
|
||||
cf := CreateClosedAPIIndexConfig()
|
||||
cf.RemoteURLHandler = h
|
||||
h := func(url string) (*http.Response, error) {
|
||||
return nil, errors.New("nope")
|
||||
}
|
||||
cf := CreateClosedAPIIndexConfig()
|
||||
cf.RemoteURLHandler = h
|
||||
|
||||
x, y := NewRemoteFSWithConfig(cf)
|
||||
assert.NotNil(t, x)
|
||||
assert.NoError(t, y)
|
||||
assert.NotNil(t, x.RemoteHandlerFunc)
|
||||
x, y := NewRemoteFSWithConfig(cf)
|
||||
assert.NotNil(t, x)
|
||||
assert.NoError(t, y)
|
||||
assert.NotNil(t, x.RemoteHandlerFunc)
|
||||
|
||||
cf = CreateClosedAPIIndexConfig()
|
||||
assert.NotNil(t, x.RemoteHandlerFunc)
|
||||
assert.NotNil(t, x.RemoteHandlerFunc)
|
||||
|
||||
x.SetRemoteHandlerFunc(h)
|
||||
assert.NotNil(t, x.RemoteHandlerFunc)
|
||||
x.SetRemoteHandlerFunc(h)
|
||||
assert.NotNil(t, x.RemoteHandlerFunc)
|
||||
|
||||
// run the handler
|
||||
i, n := x.RemoteHandlerFunc("http://www.google.com")
|
||||
assert.Nil(t, i)
|
||||
assert.Error(t, n)
|
||||
assert.Equal(t, "nope", n.Error())
|
||||
// run the handler
|
||||
i, n := x.RemoteHandlerFunc("http://www.google.com")
|
||||
assert.Nil(t, i)
|
||||
assert.Error(t, n)
|
||||
assert.Equal(t, "nope", n.Error())
|
||||
|
||||
}
|
||||
|
||||
func TestRemoteFS_NoConfigBadURL(t *testing.T) {
|
||||
x, y := NewRemoteFSWithRootURL("I am not a URL. I am a potato.: no.... // no.")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
x, y := NewRemoteFSWithRootURL("I am not a URL. I am a potato.: no.... // no.")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_Open_NoConfig(t *testing.T) {
|
||||
|
||||
rfs := &RemoteFS{}
|
||||
x, y := rfs.Open("https://pb33f.io")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
rfs := &RemoteFS{}
|
||||
x, y := rfs.Open("https://pb33f.io")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_Open_ConfigNotAllowed(t *testing.T) {
|
||||
|
||||
rfs := &RemoteFS{indexConfig: CreateClosedAPIIndexConfig()}
|
||||
x, y := rfs.Open("https://pb33f.io")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
rfs := &RemoteFS{indexConfig: CreateClosedAPIIndexConfig()}
|
||||
x, y := rfs.Open("https://pb33f.io")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_Open_BadURL(t *testing.T) {
|
||||
|
||||
rfs := &RemoteFS{indexConfig: CreateOpenAPIIndexConfig()}
|
||||
x, y := rfs.Open("I am not a URL. I am a box of candy.. yum yum yum:: in my tum tum tum")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
rfs := &RemoteFS{indexConfig: CreateOpenAPIIndexConfig()}
|
||||
x, y := rfs.Open("I am not a URL. I am a box of candy.. yum yum yum:: in my tum tum tum")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_RemoteBaseURL_RelativeRequest(t *testing.T) {
|
||||
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
return nil, fmt.Errorf("nope, not having it %s", url)
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
return nil, fmt.Errorf("nope, not having it %s", url)
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
|
||||
x, y := rfs.Open("gib/gab/jib/jab.yaml")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "nope, not having it https://pb33f.io/the/love/machine/gib/gab/jib/jab.yaml", y.Error())
|
||||
x, y := rfs.Open("gib/gab/jib/jab.yaml")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "nope, not having it https://pb33f.io/the/love/machine/gib/gab/jib/jab.yaml", y.Error())
|
||||
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_RemoteBaseURL_BadRequestButContainsBody(t *testing.T) {
|
||||
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
return &http.Response{}, fmt.Errorf("it's bad, but who cares %s", url)
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
return &http.Response{}, fmt.Errorf("it's bad, but who cares %s", url)
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
|
||||
x, y := rfs.Open("/woof.yaml")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "it's bad, but who cares https://pb33f.io/woof.yaml", y.Error())
|
||||
x, y := rfs.Open("/woof.yaml")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "it's bad, but who cares https://pb33f.io/woof.yaml", y.Error())
|
||||
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_RemoteBaseURL_NoErrorNoResponse(t *testing.T) {
|
||||
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
return nil, nil // useless!
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
return nil, nil // useless!
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
|
||||
x, y := rfs.Open("/woof.yaml")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "empty response from remote URL: https://pb33f.io/woof.yaml", y.Error())
|
||||
x, y := rfs.Open("/woof.yaml")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "empty response from remote URL: https://pb33f.io/woof.yaml", y.Error())
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_RemoteBaseURL_ReadBodyFail(t *testing.T) {
|
||||
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
r := &http.Response{}
|
||||
r.Body = &LocalFile{offset: -1} // read will fail.
|
||||
return r, nil
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
r := &http.Response{}
|
||||
r.Body = &LocalFile{offset: -1} // read will fail.
|
||||
return r, nil
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
|
||||
x, y := rfs.Open("/woof.yaml")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "error reading bytes from remote file 'https://pb33f.io/woof.yaml': "+
|
||||
"[read : invalid argument]", y.Error())
|
||||
x, y := rfs.Open("/woof.yaml")
|
||||
assert.Nil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "error reading bytes from remote file 'https://pb33f.io/woof.yaml': "+
|
||||
"[read : invalid argument]", y.Error())
|
||||
}
|
||||
|
||||
func TestNewRemoteFS_RemoteBaseURL_EmptySpecFailIndex(t *testing.T) {
|
||||
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
r := &http.Response{}
|
||||
r.Body = &LocalFile{data: []byte{}} // no bytes to read.
|
||||
return r, nil
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
cf := CreateOpenAPIIndexConfig()
|
||||
h := func(url string) (*http.Response, error) {
|
||||
r := &http.Response{}
|
||||
r.Body = &LocalFile{data: []byte{}} // no bytes to read.
|
||||
return r, nil
|
||||
}
|
||||
cf.RemoteURLHandler = h
|
||||
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
cf.BaseURL, _ = url.Parse("https://pb33f.io/the/love/machine")
|
||||
rfs, _ := NewRemoteFSWithConfig(cf)
|
||||
|
||||
x, y := rfs.Open("/woof.yaml")
|
||||
assert.NotNil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "there is nothing in the spec, it's empty - so there is nothing to be done", y.Error())
|
||||
x, y := rfs.Open("/woof.yaml")
|
||||
assert.NotNil(t, x)
|
||||
assert.Error(t, y)
|
||||
assert.Equal(t, "there is nothing in the spec, it's empty - so there is nothing to be done", y.Error())
|
||||
}
|
||||
|
||||
@@ -650,29 +650,8 @@ func (index *SpecIndex) GetGlobalCallbacksCount() int {
|
||||
|
||||
// look through method for callbacks
|
||||
callbacks, _ := yamlpath.NewPath("$..callbacks")
|
||||
// Channel used to receive the result from doSomething function
|
||||
ch := make(chan string, 1)
|
||||
|
||||
// Create a context with a timeout of 5 seconds
|
||||
ctxTimeout, cancel := context.WithTimeout(context.Background(), time.Millisecond*500)
|
||||
defer cancel()
|
||||
|
||||
var res []*yaml.Node
|
||||
|
||||
doSomething := func(ctx context.Context, ch chan<- string) {
|
||||
res, _ = callbacks.Find(m.Node)
|
||||
ch <- m.Definition
|
||||
}
|
||||
|
||||
// Start the doSomething function
|
||||
go doSomething(ctxTimeout, ch)
|
||||
|
||||
select {
|
||||
case <-ctxTimeout.Done():
|
||||
fmt.Printf("Callback %d: Context cancelled: %v\n", m.Node.Line, ctxTimeout.Err())
|
||||
case <-ch:
|
||||
}
|
||||
|
||||
res, _ = callbacks.Find(m.Node)
|
||||
if len(res) > 0 {
|
||||
for _, callback := range res[0].Content {
|
||||
if utils.IsNodeMap(callback) {
|
||||
|
||||
Reference in New Issue
Block a user