From 1df5f44e6f5769c227aa28c5358aeee1250ec7e4 Mon Sep 17 00:00:00 2001 From: Dave Shanley Date: Thu, 16 Feb 2023 16:23:36 -0500 Subject: [PATCH] Working through fix-73 and v0.6.0 --- datamodel/document_config.go | 19 + datamodel/high/v3/document_test.go | 12 + datamodel/low/reference.go | 8 + datamodel/low/v2/swagger.go | 23 +- datamodel/low/v3/create_document.go | 32 +- document.go | 61 +- document_test.go | 73 ++ index/extract_references.go | 82 ++ index/extract_refs.go | 320 ++++++ index/find_component.go | 278 +++++ index/index_model.go | 218 ++++ index/index_utils.go | 90 ++ index/spec_index.go | 1213 +------------------- index/spec_index_test.go | 54 +- index/utility_methods.go | 372 +++++++ resolver/resolver.go | 4 +- test_specs/digitalocean.yaml | 1587 +++++++++++++++++++++++++++ 17 files changed, 3241 insertions(+), 1205 deletions(-) create mode 100644 datamodel/document_config.go create mode 100644 index/extract_references.go create mode 100644 index/extract_refs.go create mode 100644 index/find_component.go create mode 100644 index/index_model.go create mode 100644 index/index_utils.go create mode 100644 index/utility_methods.go create mode 100644 test_specs/digitalocean.yaml diff --git a/datamodel/document_config.go b/datamodel/document_config.go new file mode 100644 index 0000000..d44ad1b --- /dev/null +++ b/datamodel/document_config.go @@ -0,0 +1,19 @@ +// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley +// SPDX-License-Identifier: MIT + +package datamodel + +import "net/url" + +// DocumentConfiguration is used to configure the document creation process. It was added in v0.6.0 to allow +// for more fine-grained control over controls and new features. +type DocumentConfiguration struct { + // if the document uses relative file references, this is the base url to use when resolving them. + BaseURL *url.URL + + // AllowFileReferences will allow the index to locate relative file references. This is disabled by default. + AllowFileReferences bool + + // AllowRemoteReferences will allow the index to lookup remote references. This is disabled by default. + AllowRemoteReferences bool +} diff --git a/datamodel/high/v3/document_test.go b/datamodel/high/v3/document_test.go index 0132b8b..26284b2 100644 --- a/datamodel/high/v3/document_test.go +++ b/datamodel/high/v3/document_test.go @@ -398,6 +398,18 @@ func TestAsanaAsDoc(t *testing.T) { fmt.Println(d) } +func TestDigitalOceanAsDoc(t *testing.T) { + data, _ := ioutil.ReadFile("../../../test_specs/asana.yaml") + info, _ := datamodel.ExtractSpecInfo(data) + var err []error + lowDoc, err = lowv3.CreateDocument(info) + if err != nil { + panic("broken something") + } + d := NewDocument(lowDoc) + fmt.Println(d) +} + func TestPetstoreAsDoc(t *testing.T) { data, _ := ioutil.ReadFile("../../../test_specs/petstorev3.json") info, _ := datamodel.ExtractSpecInfo(data) diff --git a/datamodel/low/reference.go b/datamodel/low/reference.go index 2af6403..099640c 100644 --- a/datamodel/low/reference.go +++ b/datamodel/low/reference.go @@ -188,6 +188,10 @@ func IsCircular(node *yaml.Node, idx *index.SpecIndex) bool { if refs[i].Journey[k].Node == node { return true } + isRef, _, refValue := utils.IsNodeRefValue(node) + if isRef && refs[i].Journey[k].Definition == refValue { + return true + } } } // check mapped references in case we didn't find it. @@ -216,6 +220,10 @@ func GetCircularReferenceResult(node *yaml.Node, idx *index.SpecIndex) *index.Ci if refs[i].Journey[k].Node == node { return refs[i] } + isRef, _, refValue := utils.IsNodeRefValue(node) + if isRef && refs[i].Journey[k].Definition == refValue { + return refs[i] + } } } // check mapped references in case we didn't find it. diff --git a/datamodel/low/v2/swagger.go b/datamodel/low/v2/swagger.go index c791d40..561864c 100644 --- a/datamodel/low/v2/swagger.go +++ b/datamodel/low/v2/swagger.go @@ -121,13 +121,32 @@ func (s *Swagger) GetExtensions() map[low.KeyReference[string]]low.ValueReferenc return s.Extensions } -func CreateDocument(info *datamodel.SpecInfo) (*Swagger, []error) { +// CreateDocumentFromConfig will create a new Swagger document from the provided SpecInfo and DocumentConfiguration. +func CreateDocumentFromConfig(info *datamodel.SpecInfo, + configuration *datamodel.DocumentConfiguration) (*Swagger, []error) { + return createDocument(info, configuration) +} +// CreateDocument will create a new Swagger document from the provided SpecInfo. +// +// Deprecated: Use CreateDocumentFromConfig instead. +func CreateDocument(info *datamodel.SpecInfo) (*Swagger, []error) { + return createDocument(info, &datamodel.DocumentConfiguration{ + AllowRemoteReferences: true, + AllowFileReferences: true, + }) +} + +func createDocument(info *datamodel.SpecInfo, config *datamodel.DocumentConfiguration) (*Swagger, []error) { doc := Swagger{Swagger: low.ValueReference[string]{Value: info.Version, ValueNode: info.RootNode}} doc.Extensions = low.ExtractExtensions(info.RootNode.Content[0]) // build an index - idx := index.NewSpecIndex(info.RootNode) + idx := index.NewSpecIndexWithConfig(info.RootNode, &index.SpecIndexConfig{ + BaseURL: config.BaseURL, + AllowRemoteLookup: config.AllowRemoteReferences, + AllowFileLookup: config.AllowFileReferences, + }) doc.Index = idx doc.SpecInfo = info diff --git a/datamodel/low/v3/create_document.go b/datamodel/low/v3/create_document.go index c12e6bb..3476511 100644 --- a/datamodel/low/v3/create_document.go +++ b/datamodel/low/v3/create_document.go @@ -11,8 +11,24 @@ import ( "sync" ) +// CreateDocument will create a new Document instance from the provided SpecInfo. +// +// Deprecated: Use CreateDocumentFromConfig instead. This function will be removed in a later version, it +// defaults to allowing file and remote references, and does not support relative file references. func CreateDocument(info *datamodel.SpecInfo) (*Document, []error) { + config := datamodel.DocumentConfiguration{ + AllowFileReferences: true, + AllowRemoteReferences: true, + } + return createDocument(info, &config) +} +// CreateDocumentFromConfig Create a new document from the provided SpecInfo and DocumentConfiguration pointer. +func CreateDocumentFromConfig(info *datamodel.SpecInfo, config *datamodel.DocumentConfiguration) (*Document, []error) { + return createDocument(info, config) +} + +func createDocument(info *datamodel.SpecInfo, config *datamodel.DocumentConfiguration) (*Document, []error) { _, labelNode, versionNode := utils.FindKeyNodeFull(OpenAPILabel, info.RootNode.Content) var version low.NodeReference[string] if versionNode == nil { @@ -22,10 +38,16 @@ func CreateDocument(info *datamodel.SpecInfo) (*Document, []error) { doc := Document{Version: version} // build an index - idx := index.NewSpecIndex(info.RootNode) + idx := index.NewSpecIndexWithConfig(info.RootNode, &index.SpecIndexConfig{ + BaseURL: config.BaseURL, + AllowFileLookup: config.AllowFileReferences, + AllowRemoteLookup: config.AllowRemoteReferences, + }) doc.Index = idx - var errors []error + var errs []error + + errs = idx.GetReferenceIndexErrors() // create resolver and check for circular references. resolve := resolver.NewResolver(idx) @@ -33,7 +55,7 @@ func CreateDocument(info *datamodel.SpecInfo) (*Document, []error) { if len(resolvingErrors) > 0 { for r := range resolvingErrors { - errors = append(errors, resolvingErrors[r]) + errs = append(errs, resolvingErrors[r]) } } @@ -71,10 +93,10 @@ func CreateDocument(info *datamodel.SpecInfo) (*Document, []error) { wg.Add(len(extractionFuncs)) for _, f := range extractionFuncs { - go runExtraction(info, &doc, idx, f, &errors, &wg) + go runExtraction(info, &doc, idx, f, &errs, &wg) } wg.Wait() - return &doc, errors + return &doc, errs } func extractInfo(info *datamodel.SpecInfo, doc *Document, idx *index.SpecIndex) error { diff --git a/document.go b/document.go index 24ab95f..44b3b0f 100644 --- a/document.go +++ b/document.go @@ -39,6 +39,10 @@ type Document interface { // GetSpecInfo will return the *datamodel.SpecInfo instance that contains all specification information. GetSpecInfo() *datamodel.SpecInfo + // SetConfiguration will set the configuration for the document. This allows for finer grained control over + // allowing remote or local references, as well as a BaseURL to allow for relative file references. + SetConfiguration(configuration *datamodel.DocumentConfiguration) + // BuildV2Model will build out a Swagger (version 2) model from the specification used to create the document // If there are any issues, then no model will be returned, instead a slice of errors will explain all the // problems that occurred. This method will only support version 2 specifications and will throw an error for @@ -63,6 +67,7 @@ type Document interface { type document struct { version string info *datamodel.SpecInfo + config *datamodel.DocumentConfiguration } // DocumentModel represents either a Swagger document (version 2) or an OpenAPI document (version 3) that is @@ -89,6 +94,16 @@ func NewDocument(specByteArray []byte) (Document, error) { return d, nil } +// NewDocumentWithConfiguration is the same as NewDocument, except it's a convenience function that calls NewDocument +// under the hood and then calls SetConfiguration() on the returned Document. +func NewDocumentWithConfiguration(specByteArray []byte, configuration *datamodel.DocumentConfiguration) (Document, error) { + d, err := NewDocument(specByteArray) + if d != nil { + d.SetConfiguration(configuration) + } + return d, err +} + func (d *document) GetVersion() string { return d.version } @@ -97,6 +112,10 @@ func (d *document) GetSpecInfo() *datamodel.SpecInfo { return d.info } +func (d *document) SetConfiguration(configuration *datamodel.DocumentConfiguration) { + d.config = configuration +} + func (d *document) Serialize() ([]byte, error) { if d.info == nil { return nil, fmt.Errorf("unable to serialize, document has not yet been initialized") @@ -120,23 +139,32 @@ func (d *document) BuildV2Model() (*DocumentModel[v2high.Swagger], []error) { "supplied spec is a different version (%v). Try 'BuildV3Model()'", d.info.SpecFormat)) return nil, errors } - lowDoc, errs := v2low.CreateDocument(d.info) - // Do not shortcircuit on circular reference errors, so the client + + var lowDoc *v2low.Swagger + if d.config == nil { + d.config = &datamodel.DocumentConfiguration{ + AllowFileReferences: true, + AllowRemoteReferences: true, + } + } + + lowDoc, errors = v2low.CreateDocumentFromConfig(d.info, d.config) + // Do not short-circuit on circular reference errors, so the client // has the option of ignoring them. - for _, err := range errs { + for _, err := range errors { if refErr, ok := err.(*resolver.ResolvingError); ok { if refErr.CircularReference == nil { - return nil, errs + return nil, errors } } else { - return nil, errs + return nil, errors } } highDoc := v2high.NewSwaggerDocument(lowDoc) return &DocumentModel[v2high.Swagger]{ Model: *highDoc, Index: lowDoc.Index, - }, errs + }, errors } func (d *document) BuildV3Model() (*DocumentModel[v3high.Document], []error) { @@ -150,23 +178,32 @@ func (d *document) BuildV3Model() (*DocumentModel[v3high.Document], []error) { "supplied spec is a different version (%v). Try 'BuildV2Model()'", d.info.SpecFormat)) return nil, errors } - lowDoc, errs := v3low.CreateDocument(d.info) - // Do not shortcircuit on circular reference errors, so the client + + var lowDoc *v3low.Document + if d.config == nil { + d.config = &datamodel.DocumentConfiguration{ + AllowFileReferences: true, + AllowRemoteReferences: true, + } + } + + lowDoc, errors = v3low.CreateDocumentFromConfig(d.info, d.config) + // Do not short-circuit on circular reference errors, so the client // has the option of ignoring them. - for _, err := range errs { + for _, err := range errors { if refErr, ok := err.(*resolver.ResolvingError); ok { if refErr.CircularReference == nil { - return nil, errs + return nil, errors } } else { - return nil, errs + return nil, errors } } highDoc := v3high.NewDocument(lowDoc) return &DocumentModel[v3high.Document]{ Model: *highDoc, Index: lowDoc.Index, - }, errs + }, errors } // CompareDocuments will accept a left and right Document implementing struct, build a model for the correct diff --git a/document_test.go b/document_test.go index dcf1b65..955a22b 100644 --- a/document_test.go +++ b/document_test.go @@ -5,7 +5,9 @@ package libopenapi import ( "fmt" + "github.com/pb33f/libopenapi/datamodel" "io/ioutil" + "net/url" "strings" "testing" @@ -247,6 +249,77 @@ func ExampleNewDocument_fromOpenAPI3Document() { // Output: There are 13 paths and 8 schemas in the document } +func ExampleNewDocument_fromWithDocumentConfigurationFailure() { + + // This example shows how to create a document that prevents the loading of external references/ + // from files or the network + + // load in the Digital Ocean OpenAPI specification + digitalOcean, _ := ioutil.ReadFile("test_specs/digitalocean.yaml") + + // create a DocumentConfiguration that prevents loading file and remote references + config := datamodel.DocumentConfiguration{ + AllowFileReferences: false, + AllowRemoteReferences: false, + } + + // create a new document from specification bytes + doc, err := NewDocumentWithConfiguration(digitalOcean, &config) + + // if anything went wrong, an error is thrown + if err != nil { + panic(fmt.Sprintf("cannot create new document: %e", err)) + } + + // only errors will be thrown, so just capture them and print the number of errors. + _, errors := doc.BuildV3Model() + + // if anything went wrong when building the v3 model, a slice of errors will be returned + if len(errors) > 0 { + fmt.Println("Error building Digital Ocean spec errors reported") + } + // Output: Error building Digital Ocean spec errors reported +} + +func ExampleNewDocument_fromWithDocumentConfigurationSuccess() { + + // This example shows how to create a document that prevents the loading of external references/ + // from files or the network + + // load in the Digital Ocean OpenAPI specification + digitalOcean, _ := ioutil.ReadFile("test_specs/digitalocean.yaml") + + // Digital Ocean needs a baseURL to be set, so we can resolve relative references. + baseURL, _ := url.Parse("https://raw.githubusercontent.com/digitalocean/openapi/main/specification") + + // create a DocumentConfiguration that allows loading file and remote references, and sets the baseURL + // to somewhere that can resolve the relative references. + config := datamodel.DocumentConfiguration{ + AllowFileReferences: true, + AllowRemoteReferences: true, + BaseURL: baseURL, + } + + // create a new document from specification bytes + doc, err := NewDocumentWithConfiguration(digitalOcean, &config) + + // if anything went wrong, an error is thrown + if err != nil { + panic(fmt.Sprintf("cannot create new document: %e", err)) + } + + // only errors will be thrown, so just capture them and print the number of errors. + _, errors := doc.BuildV3Model() + + // if anything went wrong when building the v3 model, a slice of errors will be returned + if len(errors) > 0 { + fmt.Println("Error building Digital Ocean spec errors reported") + } else { + fmt.Println("Digital Ocean spec built successfully") + } + // Output: Digital Ocean spec built successfully +} + func ExampleNewDocument_fromSwaggerDocument() { // How to read in a Swagger / OpenAPI 2 Specification, into a Document. diff --git a/index/extract_references.go b/index/extract_references.go new file mode 100644 index 0000000..62c1cda --- /dev/null +++ b/index/extract_references.go @@ -0,0 +1,82 @@ +// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley +// SPDX-License-Identifier: MIT + +package index + +import ( + "fmt" + "github.com/pb33f/libopenapi/utils" + "strings" +) + +// ExtractComponentsFromRefs returns located components from references. The returned nodes from here +// can be used for resolving as they contain the actual object properties. +func (index *SpecIndex) ExtractComponentsFromRefs(refs []*Reference) []*Reference { + var found []*Reference + + //run this async because when things get recursive, it can take a while + c := make(chan bool) + + locate := func(ref *Reference, refIndex int, sequence []*ReferenceMapped) { + located := index.FindComponent(ref.Definition, ref.Node) + if located != nil { + index.refLock.Lock() + if index.allMappedRefs[ref.Definition] == nil { + found = append(found, located) + index.allMappedRefs[ref.Definition] = located + sequence[refIndex] = &ReferenceMapped{ + Reference: located, + Definition: ref.Definition, + } + } + index.refLock.Unlock() + } else { + + _, path := utils.ConvertComponentIdIntoFriendlyPathSearch(ref.Definition) + indexError := &IndexingError{ + Err: fmt.Errorf("component '%s' does not exist in the specification", ref.Definition), + Node: ref.Node, + Path: path, + } + index.refErrors = append(index.refErrors, indexError) + } + c <- true + } + + var refsToCheck []*Reference + for _, ref := range refs { + + // check reference for backslashes (hah yeah seen this too!) + if strings.Contains(ref.Definition, "\\") { // this was from blazemeter.com haha! + _, path := utils.ConvertComponentIdIntoFriendlyPathSearch(ref.Definition) + indexError := &IndexingError{ + Err: fmt.Errorf("component '%s' contains a backslash '\\'. It's not valid", ref.Definition), + Node: ref.Node, + Path: path, + } + index.refErrors = append(index.refErrors, indexError) + continue + + } + refsToCheck = append(refsToCheck, ref) + } + mappedRefsInSequence := make([]*ReferenceMapped, len(refsToCheck)) + for r := range refsToCheck { + // expand our index of all mapped refs + go locate(refsToCheck[r], r, mappedRefsInSequence) + } + + completedRefs := 0 + for completedRefs < len(refsToCheck) { + select { + case <-c: + completedRefs++ + } + } + for m := range mappedRefsInSequence { + if mappedRefsInSequence[m] != nil { + index.allMappedRefsSequenced = append(index.allMappedRefsSequenced, mappedRefsInSequence[m]) + } + } + return found +} diff --git a/index/extract_refs.go b/index/extract_refs.go new file mode 100644 index 0000000..43884ee --- /dev/null +++ b/index/extract_refs.go @@ -0,0 +1,320 @@ +// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley +// SPDX-License-Identifier: MIT + +package index + +import ( + "errors" + "fmt" + "github.com/pb33f/libopenapi/utils" + "gopkg.in/yaml.v3" + "strings" +) + +// ExtractRefs will return a deduplicated slice of references for every unique ref found in the document. +// The total number of refs, will generally be much higher, you can extract those from GetRawReferenceCount() +func (index *SpecIndex) ExtractRefs(node, parent *yaml.Node, seenPath []string, level int, poly bool, pName string) []*Reference { + if node == nil { + return nil + } + var found []*Reference + if len(node.Content) > 0 { + var prev, polyName string + for i, n := range node.Content { + + if utils.IsNodeMap(n) || utils.IsNodeArray(n) { + level++ + // check if we're using polymorphic values. These tend to create rabbit warrens of circular + // references if every single link is followed. We don't resolve polymorphic values. + isPoly, _ := index.checkPolymorphicNode(prev) + polyName = pName + if isPoly { + poly = true + if prev != "" { + polyName = prev + } + } + found = append(found, index.ExtractRefs(n, node, seenPath, level, poly, polyName)...) + } + + // check if we're dealing with an inline schema definition, that isn't part of an array + // (which means it's being used as a value in an array, and it's not a label) + // https://github.com/pb33f/libopenapi/issues/76 + if i%2 == 0 && n.Value == "schema" && !utils.IsNodeArray(node) && (i+1 < len(node.Content)) { + isRef, _, _ := utils.IsNodeRefValue(node.Content[i+1]) + if isRef { + continue + } + ref := &Reference{ + Node: node.Content[i+1], + Path: fmt.Sprintf("$.%s", strings.Join(seenPath, ".")), + } + index.allInlineSchemaDefinitions = append(index.allInlineSchemaDefinitions, ref) + + // check if the schema is an object or an array, + // and if so, add it to the list of inline schema object definitions. + k, v := utils.FindKeyNodeTop("type", node.Content[i+1].Content) + if k != nil && v != nil { + if v.Value == "object" || v.Value == "array" { + index.allInlineSchemaObjectDefinitions = append(index.allInlineSchemaObjectDefinitions, ref) + } + } + } + + if i%2 == 0 && n.Value == "$ref" { + + // only look at scalar values, not maps (looking at you k8s) + if !utils.IsNodeStringValue(node.Content[i+1]) { + continue + } + + index.linesWithRefs[n.Line] = true + + fp := make([]string, len(seenPath)) + for x, foundPathNode := range seenPath { + fp[x] = foundPathNode + } + + value := node.Content[i+1].Value + + segs := strings.Split(value, "/") + name := segs[len(segs)-1] + _, p := utils.ConvertComponentIdIntoFriendlyPathSearch(value) + ref := &Reference{ + Definition: value, + Name: name, + Node: node, + Path: p, + } + + // add to raw sequenced refs + index.rawSequencedRefs = append(index.rawSequencedRefs, ref) + + // add ref by line number + refNameIndex := strings.LastIndex(value, "/") + refName := value[refNameIndex+1:] + if len(index.refsByLine[refName]) > 0 { + index.refsByLine[refName][n.Line] = true + } else { + v := make(map[int]bool) + v[n.Line] = true + index.refsByLine[refName] = v + } + + // if this ref value has any siblings (node.Content is larger than two elements) + // then add to refs with siblings + if len(node.Content) > 2 { + copiedNode := *node + copied := Reference{ + Definition: ref.Definition, + Name: ref.Name, + Node: &copiedNode, + Path: p, + } + // protect this data using a copy, prevent the resolver from destroying things. + index.refsWithSiblings[value] = copied + } + + // if this is a polymorphic reference, we're going to leave it out + // allRefs. We don't ever want these resolved, so instead of polluting + // the timeline, we will keep each poly ref in its own collection for later + // analysis. + if poly { + index.polymorphicRefs[value] = ref + + // index each type + switch pName { + case "anyOf": + index.polymorphicAnyOfRefs = append(index.polymorphicAnyOfRefs, ref) + case "allOf": + index.polymorphicAllOfRefs = append(index.polymorphicAllOfRefs, ref) + case "oneOf": + index.polymorphicOneOfRefs = append(index.polymorphicOneOfRefs, ref) + } + continue + } + + // check if this is a dupe, if so, skip it, we don't care now. + if index.allRefs[value] != nil { // seen before, skip. + continue + } + + if value == "" { + + completedPath := fmt.Sprintf("$.%s", strings.Join(fp, ".")) + + indexError := &IndexingError{ + Err: errors.New("schema reference is empty and cannot be processed"), + Node: node.Content[i+1], + Path: completedPath, + } + + index.refErrors = append(index.refErrors, indexError) + + continue + } + + index.allRefs[value] = ref + found = append(found, ref) + } + + if i%2 == 0 && n.Value != "$ref" && n.Value != "" { + + nodePath := fmt.Sprintf("$.%s", strings.Join(seenPath, ".")) + + // capture descriptions and summaries + if n.Value == "description" { + + // if the parent is a sequence, ignore. + if utils.IsNodeArray(node) { + continue + } + + ref := &DescriptionReference{ + Content: node.Content[i+1].Value, + Path: nodePath, + Node: node.Content[i+1], + IsSummary: false, + } + + index.allDescriptions = append(index.allDescriptions, ref) + index.descriptionCount++ + } + + if n.Value == "summary" { + + var b *yaml.Node + if len(node.Content) == i+1 { + b = node.Content[i] + } else { + b = node.Content[i+1] + } + ref := &DescriptionReference{ + Content: b.Value, + Path: nodePath, + Node: b, + IsSummary: true, + } + + index.allSummaries = append(index.allSummaries, ref) + index.summaryCount++ + } + + // capture security requirement references (these are not traditional references, but they + // are used as a look-up. This is the only exception to the design. + if n.Value == "security" { + var b *yaml.Node + if len(node.Content) == i+1 { + b = node.Content[i] + } else { + b = node.Content[i+1] + } + if utils.IsNodeArray(b) { + var secKey string + for k := range b.Content { + if utils.IsNodeMap(b.Content[k]) { + for g := range b.Content[k].Content { + if g%2 == 0 { + secKey = b.Content[k].Content[g].Value + continue + } + if utils.IsNodeArray(b.Content[k].Content[g]) { + var refMap map[string][]*Reference + if index.securityRequirementRefs[secKey] == nil { + index.securityRequirementRefs[secKey] = make(map[string][]*Reference) + refMap = index.securityRequirementRefs[secKey] + } else { + refMap = index.securityRequirementRefs[secKey] + } + for r := range b.Content[k].Content[g].Content { + var refs []*Reference + if refMap[b.Content[k].Content[g].Content[r].Value] != nil { + refs = refMap[b.Content[k].Content[g].Content[r].Value] + } + + refs = append(refs, &Reference{ + Definition: b.Content[k].Content[g].Content[r].Value, + Path: fmt.Sprintf("%s.security[%d].%s[%d]", nodePath, k, secKey, r), + Node: b.Content[k].Content[g].Content[r], + }) + + index.securityRequirementRefs[secKey][b.Content[k].Content[g].Content[r].Value] = refs + } + } + } + } + } + } + } + // capture enums + if n.Value == "enum" { + + // all enums need to have a type, extract the type from the node where the enum was found. + _, enumKeyValueNode := utils.FindKeyNodeTop("type", node.Content) + + if enumKeyValueNode != nil { + ref := &EnumReference{ + Path: nodePath, + Node: node.Content[i+1], + Type: enumKeyValueNode, + SchemaNode: node, + ParentNode: parent, + } + + index.allEnums = append(index.allEnums, ref) + index.enumCount++ + } + } + // capture all objects with properties + if n.Value == "properties" { + _, typeKeyValueNode := utils.FindKeyNodeTop("type", node.Content) + + if typeKeyValueNode != nil { + isObject := false + + if typeKeyValueNode.Value == "object" { + isObject = true + } + + for _, v := range typeKeyValueNode.Content { + if v.Value == "object" { + isObject = true + } + } + + if isObject { + index.allObjectsWithProperties = append(index.allObjectsWithProperties, &ObjectReference{ + Path: nodePath, + Node: node, + ParentNode: parent, + }) + } + } + } + + seenPath = append(seenPath, n.Value) + prev = n.Value + } + + // if next node is map, don't add segment. + if i < len(node.Content)-1 { + next := node.Content[i+1] + + if i%2 != 0 && next != nil && !utils.IsNodeArray(next) && !utils.IsNodeMap(next) { + seenPath = seenPath[:len(seenPath)-1] + } + } + } + if len(seenPath) > 0 { + seenPath = seenPath[:len(seenPath)-1] + } + + } + if len(seenPath) > 0 { + seenPath = seenPath[:len(seenPath)-1] + } + + index.refCount = len(index.allRefs) + + return found +} diff --git a/index/find_component.go b/index/find_component.go new file mode 100644 index 0000000..0bd141e --- /dev/null +++ b/index/find_component.go @@ -0,0 +1,278 @@ +// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley +// SPDX-License-Identifier: MIT + +package index + +import ( + "fmt" + "github.com/pb33f/libopenapi/utils" + "github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath" + "gopkg.in/yaml.v3" + "io/ioutil" + "strings" +) + +// FindComponent will locate a component by its reference, returns nil if nothing is found. +// This method will recurse through remote, local and file references. For each new external reference +// a new index will be created. These indexes can then be traversed recursively. +func (index *SpecIndex) FindComponent(componentId string, parent *yaml.Node) *Reference { + if index.root == nil { + return nil + } + + remoteLookup := func(id string) (*yaml.Node, *yaml.Node, error) { + if index.config.AllowRemoteLookup { + return index.lookupRemoteReference(id) + } else { + return nil, nil, fmt.Errorf("remote lookups are not premitted, " + + "please set AllowRemoteLookup to true in the configuration") + } + } + + fileLookup := func(id string) (*yaml.Node, *yaml.Node, error) { + if index.config.AllowFileLookup { + return index.lookupFileReference(id) + } else { + return nil, nil, fmt.Errorf("local lookups are not permitted, " + + "please set AllowFileLookup to true in the configuration") + } + } + + switch DetermineReferenceResolveType(componentId) { + case LocalResolve: // ideally, every single ref in every single spec is local. however, this is not the case. + return index.FindComponentInRoot(componentId) + + case HttpResolve: + uri := strings.Split(componentId, "#") + if len(uri) >= 2 { + return index.performExternalLookup(uri, componentId, remoteLookup, parent) + } + if len(uri) == 1 { + // if there is no reference, second segment is empty / has no name + // this means there is no component to look-up and the entire file should be pulled in. + // to stop all the other code from breaking (that is expecting a component), let's just post-pend + // a hash to the end of the componentId and ensure the uri slice is as expected. + // described in https://github.com/pb33f/libopenapi/issues/37 + componentId = fmt.Sprintf("%s#", componentId) + uri = append(uri, "") + return index.performExternalLookup(uri, componentId, remoteLookup, parent) + } + + case FileResolve: + uri := strings.Split(componentId, "#") + if len(uri) == 2 { + return index.performExternalLookup(uri, componentId, fileLookup, parent) + } + if len(uri) == 1 { + // if there is no reference, second segment is empty / has no name + // this means there is no component to look-up and the entire file should be pulled in. + // to stop all the other code from breaking (that is expecting a component), let's just post-pend + // a hash to the end of the componentId and ensure the uri slice is as expected. + // described in https://github.com/pb33f/libopenapi/issues/37 + // + // ^^ this same issue was re-reported in file based lookups in vacuum. + // more info here: https://github.com/daveshanley/vacuum/issues/225 + componentId = fmt.Sprintf("%s#", componentId) + uri = append(uri, "") + return index.performExternalLookup(uri, componentId, fileLookup, parent) + } + } + return nil +} + +func (index *SpecIndex) lookupRemoteReference(ref string) (*yaml.Node, *yaml.Node, error) { + // split string to remove file reference + uri := strings.Split(ref, "#") + + var parsedRemoteDocument *yaml.Node + if index.seenRemoteSources[uri[0]] != nil { + parsedRemoteDocument = index.seenRemoteSources[uri[0]] + } else { + index.httpLock.Lock() + resp, err := index.httpClient.Get(uri[0]) + index.httpLock.Unlock() + if err != nil { + return nil, nil, err + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, nil, err + } + + var remoteDoc yaml.Node + err = yaml.Unmarshal(body, &remoteDoc) + if err != nil { + return nil, nil, err + } + parsedRemoteDocument = &remoteDoc + index.remoteLock.Lock() + index.seenRemoteSources[uri[0]] = &remoteDoc + index.remoteLock.Unlock() + } + + // lookup item from reference by using a path query. + var query string + if len(uri) >= 2 { + query = fmt.Sprintf("$%s", strings.ReplaceAll(uri[1], "/", ".")) + } else { + query = "$" + } + + // remove any URL encoding + query = strings.Replace(query, "~1", "./", 1) + query = strings.ReplaceAll(query, "~1", "/") + + path, err := yamlpath.NewPath(query) + if err != nil { + return nil, nil, err + } + result, _ := path.Find(parsedRemoteDocument) + if len(result) == 1 { + return result[0], parsedRemoteDocument, nil + } + return nil, nil, nil +} + +func (index *SpecIndex) lookupFileReference(ref string) (*yaml.Node, *yaml.Node, error) { + // split string to remove file reference + uri := strings.Split(ref, "#") + + file := strings.ReplaceAll(uri[0], "file:", "") + + var parsedRemoteDocument *yaml.Node + + if index.seenRemoteSources[file] != nil { + parsedRemoteDocument = index.seenRemoteSources[file] + + } else { + + // try and read the file off the local file system, if it fails + // check for a baseURL and then ask our remote lookup function to go try and get it. + // index.fileLock.Lock() + body, err := ioutil.ReadFile(file) + // index.fileLock.Unlock() + + if err != nil { + + // if we have a baseURL, then we can try and get the file from there. + if index.config != nil && index.config.BaseURL != nil { + + u := index.config.BaseURL + remoteRef := fmt.Sprintf("%s://%s%s/%s", u.Scheme, u.Host, u.Path, ref) + a, b, e := index.lookupRemoteReference(remoteRef) + if e != nil { + // give up, we can't find the file, not locally, not remotely. It's toast. + return nil, nil, e + } + + // everything looks good, lets just make sure we also add a key to the raw reference name. + if _, ok := index.seenRemoteSources[file]; !ok { + index.seenRemoteSources[file] = b + } + + return a, b, nil + + } else { + // no baseURL? then we can't do anything, give up. + return nil, nil, err + } + } + + var remoteDoc yaml.Node + err = yaml.Unmarshal(body, &remoteDoc) + if err != nil { + return nil, nil, err + } + parsedRemoteDocument = &remoteDoc + index.seenRemoteSources[file] = &remoteDoc + } + + // lookup item from reference by using a path query. + var query string + if len(uri) >= 2 { + query = fmt.Sprintf("$%s", strings.ReplaceAll(uri[1], "/", ".")) + } else { + query = "$" + } + + // remove any URL encoding + query = strings.Replace(query, "~1", "./", 1) + query = strings.ReplaceAll(query, "~1", "/") + + path, err := yamlpath.NewPath(query) + if err != nil { + return nil, nil, err + } + result, _ := path.Find(parsedRemoteDocument) + if len(result) == 1 { + return result[0], parsedRemoteDocument, nil + } + + return nil, parsedRemoteDocument, nil +} + +func (index *SpecIndex) FindComponentInRoot(componentId string) *Reference { + if index.root != nil { + name, friendlySearch := utils.ConvertComponentIdIntoFriendlyPathSearch(componentId) + path, err := yamlpath.NewPath(friendlySearch) + if path == nil || err != nil { + return nil // no component found + } + res, _ := path.Find(index.root) + if len(res) == 1 { + ref := &Reference{ + Definition: componentId, + Name: name, + Node: res[0], + Path: friendlySearch, + RequiredRefProperties: index.extractDefinitionRequiredRefProperties(res[0], map[string][]string{}), + } + + return ref + } + } + return nil +} + +func (index *SpecIndex) performExternalLookup(uri []string, componentId string, + lookupFunction ExternalLookupFunction, parent *yaml.Node, +) *Reference { + if len(uri) > 0 { + externalSpecIndex := index.externalSpecIndex[uri[0]] + if externalSpecIndex == nil { + _, newRoot, err := lookupFunction(componentId) + if err != nil { + indexError := &IndexingError{ + Err: err, + Node: parent, + Path: componentId, + } + index.refErrors = append(index.refErrors, indexError) + return nil + } + + // cool, cool, lets index this spec also. This is a recursive action and will keep going + // until all remote references have been found. + newIndex := NewSpecIndexWithConfig(newRoot, index.config) + index.fileLock.Lock() + index.externalSpecIndex[uri[0]] = newIndex + index.fileLock.Unlock() + externalSpecIndex = newIndex + } + + foundRef := externalSpecIndex.FindComponentInRoot(uri[1]) + if foundRef != nil { + nameSegs := strings.Split(uri[1], "/") + ref := &Reference{ + Definition: componentId, + Name: nameSegs[len(nameSegs)-1], + Node: foundRef.Node, + IsRemote: true, + RemoteLocation: componentId, + Path: foundRef.Path, + } + return ref + } + } + return nil +} diff --git a/index/index_model.go b/index/index_model.go new file mode 100644 index 0000000..208716e --- /dev/null +++ b/index/index_model.go @@ -0,0 +1,218 @@ +// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley +// SPDX-License-Identifier: MIT + +package index + +import ( + "gopkg.in/yaml.v3" + "net/http" + "net/url" + "sync" +) + +// Constants used to determine if resolving is local, file based or remote file based. +const ( + LocalResolve = iota + HttpResolve + FileResolve +) + +// Reference is a wrapper around *yaml.Node results to make things more manageable when performing +// algorithms on data models. the *yaml.Node def is just a bit too low level for tracking state. +type Reference struct { + Definition string + Name string + Node *yaml.Node + ParentNode *yaml.Node + Resolved bool + Circular bool + Seen bool + IsRemote bool + RemoteLocation string + Path string // this won't always be available. + RequiredRefProperties map[string][]string // definition names (eg, #/definitions/One) to a list of required properties on this definition which reference that definition +} + +// ReferenceMapped is a helper struct for mapped references put into sequence (we lose the key) +type ReferenceMapped struct { + Reference *Reference + Definition string +} + +// SpecIndexConfig is a configuration struct for the SpecIndex introduced in 0.6.0 that provides an expandable +// set of granular options. The first being the ability to set the Base URL for resolving relative references, and +// allowing or disallowing remote or local file lookups. +// - https://github.com/pb33f/libopenapi/issues/73 +type SpecIndexConfig struct { + // The BaseURL will be the root from which relative references will be resolved from if they can't be found locally. + // + // For example: + // - $ref: somefile.yaml#/components/schemas/SomeSchema + // + // Might not be found locally, if the file was pulled in from a remote server (a good example is the DigitalOcean API). + // so by setting a BaseURL, the reference will try to be resolved from the remote server. + // + // If our baseURL is set to https://pb33f.io/libopenapi then our reference will try to be resolved from: + // - $ref: https://pb33f.io/libopenapi/somefile.yaml#/components/schemas/SomeSchema + // + // More details on relative references can be found in issue #73: https://github.com/pb33f/libopenapi/issues/73 + BaseURL *url.URL // set the Base URL for resolving relative references if the spec is exploded. + + // In an earlier version of libopenapi (pre 0.6.0) the index would automatically resolve all references + // They could have been local, or they could have been remote. This was a problem because it meant + // There was a potential for a remote exploit if a remote reference was malicious. There aren't any known + // exploits, but it's better to be safe than sorry. + // + // To read more about this, you can find a discussion here: https://github.com/pb33f/libopenapi/pull/64 + AllowRemoteLookup bool // Allow remote lookups for references. Defaults to false + AllowFileLookup bool // Allow file lookups for references. Defaults to false +} + +// SpecIndex is a complete pre-computed index of the entire specification. Numbers are pre-calculated and +// quick direct access to paths, operations, tags are all available. No need to walk the entire node tree in rules, +// everything is pre-walked if you need it. +type SpecIndex struct { + allRefs map[string]*Reference // all (deduplicated) refs + rawSequencedRefs []*Reference // all raw references in sequence as they are scanned, not deduped. + linesWithRefs map[int]bool // lines that link to references. + allMappedRefs map[string]*Reference // these are the located mapped refs + allMappedRefsSequenced []*ReferenceMapped // sequenced mapped refs + refsByLine map[string]map[int]bool // every reference and the lines it's referenced from + pathRefs map[string]map[string]*Reference // all path references + paramOpRefs map[string]map[string]map[string]*Reference // params in operations. + paramCompRefs map[string]*Reference // params in components + paramAllRefs map[string]*Reference // combined components and ops + paramInlineDuplicates map[string][]*Reference // inline params all with the same name + globalTagRefs map[string]*Reference // top level global tags + securitySchemeRefs map[string]*Reference // top level security schemes + requestBodiesRefs map[string]*Reference // top level request bodies + responsesRefs map[string]*Reference // top level responses + headersRefs map[string]*Reference // top level responses + examplesRefs map[string]*Reference // top level examples + securityRequirementRefs map[string]map[string][]*Reference // (NOT $ref) but a name based lookup for requirements + callbacksRefs map[string]map[string][]*Reference // all links + linksRefs map[string]map[string][]*Reference // all callbacks + operationTagsRefs map[string]map[string][]*Reference // tags found in operations + operationDescriptionRefs map[string]map[string]*Reference // descriptions in operations. + operationSummaryRefs map[string]map[string]*Reference // summaries in operations + callbackRefs map[string]*Reference // top level callback refs + serversRefs []*Reference // all top level server refs + rootServersNode *yaml.Node // servers root node + opServersRefs map[string]map[string][]*Reference // all operation level server overrides. + polymorphicRefs map[string]*Reference // every reference to a polymorphic ref + polymorphicAllOfRefs []*Reference // every reference to 'allOf' references + polymorphicOneOfRefs []*Reference // every reference to 'oneOf' references + polymorphicAnyOfRefs []*Reference // every reference to 'anyOf' references + externalDocumentsRef []*Reference // all external documents in spec + rootSecurity []*Reference // root security definitions. + rootSecurityNode *yaml.Node // root security node. + refsWithSiblings map[string]Reference // references with sibling elements next to them + pathRefsLock sync.Mutex // create lock for all refs maps, we want to build data as fast as we can + externalDocumentsCount int // number of externalDocument nodes found + operationTagsCount int // number of unique tags in operations + globalTagsCount int // number of global tags defined + totalTagsCount int // number unique tags in spec + securitySchemesCount int // security schemes + globalRequestBodiesCount int // component request bodies + globalResponsesCount int // component responses + globalHeadersCount int // component headers + globalExamplesCount int // component examples + globalLinksCount int // component links + globalCallbacksCount int // component callbacks + globalCallbacks int // component callbacks. + pathCount int // number of paths + operationCount int // number of operations + operationParamCount int // number of params defined in operations + componentParamCount int // number of params defined in components + componentsInlineParamUniqueCount int // number of inline params with unique names + componentsInlineParamDuplicateCount int // number of inline params with duplicate names + schemaCount int // number of schemas + refCount int // total ref count + root *yaml.Node // the root document + pathsNode *yaml.Node // paths node + tagsNode *yaml.Node // tags node + componentsNode *yaml.Node // components node + parametersNode *yaml.Node // components/parameters node + allParametersNode map[string]*Reference // all parameters node + allParameters map[string]*Reference // all parameters (components/defs) + schemasNode *yaml.Node // components/schemas node + allInlineSchemaDefinitions []*Reference // all schemas found in document outside of components (openapi) or definitions (swagger). + allInlineSchemaObjectDefinitions []*Reference // all schemas that are objects found in document outside of components (openapi) or definitions (swagger). + allComponentSchemaDefinitions map[string]*Reference // all schemas found in components (openapi) or definitions (swagger). + securitySchemesNode *yaml.Node // components/securitySchemes node + allSecuritySchemes map[string]*Reference // all security schemes / definitions. + requestBodiesNode *yaml.Node // components/requestBodies node + allRequestBodies map[string]*Reference // all request bodies + responsesNode *yaml.Node // components/responses node + allResponses map[string]*Reference // all responses + headersNode *yaml.Node // components/headers node + allHeaders map[string]*Reference // all headers + examplesNode *yaml.Node // components/examples node + allExamples map[string]*Reference // all components examples + linksNode *yaml.Node // components/links node + allLinks map[string]*Reference // all links + callbacksNode *yaml.Node // components/callbacks node + allCallbacks map[string]*Reference // all components examples + externalDocumentsNode *yaml.Node // external documents node + allExternalDocuments map[string]*Reference // all external documents + externalSpecIndex map[string]*SpecIndex // create a primary index of all external specs and componentIds + refErrors []error // errors when indexing references + operationParamErrors []error // errors when indexing parameters + allDescriptions []*DescriptionReference // every single description found in the spec. + allSummaries []*DescriptionReference // every single summary found in the spec. + allEnums []*EnumReference // every single enum found in the spec. + allObjectsWithProperties []*ObjectReference // every single object with properties found in the spec. + enumCount int + descriptionCount int + summaryCount int + seenRemoteSources map[string]*yaml.Node + remoteLock sync.Mutex + httpLock sync.Mutex + fileLock sync.Mutex + refLock sync.Mutex + circularReferences []*CircularReferenceResult // only available when the resolver has been used. + allowCircularReferences bool // decide if you want to error out, or allow circular references, default is false. + config *SpecIndexConfig // configuration for the index + httpClient *http.Client + componentIndexChan chan bool + polyComponentIndexChan chan bool +} + +// ExternalLookupFunction is for lookup functions that take a JSONSchema reference and tries to find that node in the +// URI based document. Decides if the reference is local, remote or in a file. +type ExternalLookupFunction func(id string) (foundNode *yaml.Node, rootNode *yaml.Node, lookupError error) + +// IndexingError holds data about something that went wrong during indexing. +type IndexingError struct { + Err error + Node *yaml.Node + Path string +} + +func (i *IndexingError) Error() string { + return i.Err.Error() +} + +// DescriptionReference holds data about a description that was found and where it was found. +type DescriptionReference struct { + Content string + Path string + Node *yaml.Node + IsSummary bool +} + +type EnumReference struct { + Node *yaml.Node + Type *yaml.Node + Path string + SchemaNode *yaml.Node + ParentNode *yaml.Node +} + +type ObjectReference struct { + Node *yaml.Node + Path string + ParentNode *yaml.Node +} + +var methodTypes = []string{"get", "post", "put", "patch", "options", "head", "delete"} diff --git a/index/index_utils.go b/index/index_utils.go new file mode 100644 index 0000000..7d3d7c4 --- /dev/null +++ b/index/index_utils.go @@ -0,0 +1,90 @@ +// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley +// SPDX-License-Identifier: MIT + +package index + +import ( + "gopkg.in/yaml.v3" + "net/http" + "strings" + "time" +) + +func isHttpMethod(val string) bool { + switch strings.ToLower(val) { + case methodTypes[0]: + return true + case methodTypes[1]: + return true + case methodTypes[2]: + return true + case methodTypes[3]: + return true + case methodTypes[4]: + return true + case methodTypes[5]: + return true + case methodTypes[6]: + return true + } + return false +} + +func DetermineReferenceResolveType(ref string) int { + if ref != "" && ref[0] == '#' { + return LocalResolve + } + if ref != "" && len(ref) >= 5 && (ref[:5] == "https" || ref[:5] == "http:") { + return HttpResolve + } + if strings.Contains(ref, ".json") || + strings.Contains(ref, ".yaml") || + strings.Contains(ref, ".yml") { + return FileResolve + } + return -1 +} + +func boostrapIndexCollections(rootNode *yaml.Node, index *SpecIndex) { + index.root = rootNode + index.allRefs = make(map[string]*Reference) + index.allMappedRefs = make(map[string]*Reference) + index.refsByLine = make(map[string]map[int]bool) + index.linesWithRefs = make(map[int]bool) + index.pathRefs = make(map[string]map[string]*Reference) + index.paramOpRefs = make(map[string]map[string]map[string]*Reference) + index.operationTagsRefs = make(map[string]map[string][]*Reference) + index.operationDescriptionRefs = make(map[string]map[string]*Reference) + index.operationSummaryRefs = make(map[string]map[string]*Reference) + index.paramCompRefs = make(map[string]*Reference) + index.paramAllRefs = make(map[string]*Reference) + index.paramInlineDuplicates = make(map[string][]*Reference) + index.globalTagRefs = make(map[string]*Reference) + index.securitySchemeRefs = make(map[string]*Reference) + index.requestBodiesRefs = make(map[string]*Reference) + index.responsesRefs = make(map[string]*Reference) + index.headersRefs = make(map[string]*Reference) + index.examplesRefs = make(map[string]*Reference) + index.callbacksRefs = make(map[string]map[string][]*Reference) + index.linksRefs = make(map[string]map[string][]*Reference) + index.callbackRefs = make(map[string]*Reference) + index.externalSpecIndex = make(map[string]*SpecIndex) + index.allComponentSchemaDefinitions = make(map[string]*Reference) + index.allParameters = make(map[string]*Reference) + index.allSecuritySchemes = make(map[string]*Reference) + index.allRequestBodies = make(map[string]*Reference) + index.allResponses = make(map[string]*Reference) + index.allHeaders = make(map[string]*Reference) + index.allExamples = make(map[string]*Reference) + index.allLinks = make(map[string]*Reference) + index.allCallbacks = make(map[string]*Reference) + index.allExternalDocuments = make(map[string]*Reference) + index.securityRequirementRefs = make(map[string]map[string][]*Reference) + index.polymorphicRefs = make(map[string]*Reference) + index.refsWithSiblings = make(map[string]Reference) + index.seenRemoteSources = make(map[string]*yaml.Node) + index.opServersRefs = make(map[string]map[string][]*Reference) + index.httpClient = &http.Client{Timeout: time.Duration(5) * time.Second} + index.componentIndexChan = make(chan bool) + index.polyComponentIndexChan = make(chan bool) +} diff --git a/index/spec_index.go b/index/spec_index.go index 637d674..66d4a56 100644 --- a/index/spec_index.go +++ b/index/spec_index.go @@ -13,238 +13,44 @@ package index import ( - "errors" "fmt" - "io/ioutil" - "net/http" - "strings" - "sync" - "github.com/pb33f/libopenapi/utils" "github.com/vmware-labs/yaml-jsonpath/pkg/yamlpath" "gopkg.in/yaml.v3" + "strings" + "sync" ) -// Constants used to determine if resolving is local, file based or remote file based. -const ( - LocalResolve = iota - HttpResolve - FileResolve -) - -// Reference is a wrapper around *yaml.Node results to make things more manageable when performing -// algorithms on data models. the *yaml.Node def is just a bit too low level for tracking state. -type Reference struct { - Definition string - Name string - Node *yaml.Node - ParentNode *yaml.Node - Resolved bool - Circular bool - Seen bool - IsRemote bool - RemoteLocation string - Path string // this won't always be available. - RequiredRefProperties map[string][]string // definition names (eg, #/definitions/One) to a list of required properties on this definition which reference that definition -} - -// ReferenceMapped is a helper struct for mapped references put into sequence (we lose the key) -type ReferenceMapped struct { - Reference *Reference - Definition string -} - -// SpecIndex is a complete pre-computed index of the entire specification. Numbers are pre-calculated and -// quick direct access to paths, operations, tags are all available. No need to walk the entire node tree in rules, -// everything is pre-walked if you need it. -type SpecIndex struct { - allRefs map[string]*Reference // all (deduplicated) refs - rawSequencedRefs []*Reference // all raw references in sequence as they are scanned, not deduped. - linesWithRefs map[int]bool // lines that link to references. - allMappedRefs map[string]*Reference // these are the located mapped refs - allMappedRefsSequenced []*ReferenceMapped // sequenced mapped refs - refsByLine map[string]map[int]bool // every reference and the lines it's referenced from - pathRefs map[string]map[string]*Reference // all path references - paramOpRefs map[string]map[string]map[string]*Reference // params in operations. - paramCompRefs map[string]*Reference // params in components - paramAllRefs map[string]*Reference // combined components and ops - paramInlineDuplicates map[string][]*Reference // inline params all with the same name - globalTagRefs map[string]*Reference // top level global tags - securitySchemeRefs map[string]*Reference // top level security schemes - requestBodiesRefs map[string]*Reference // top level request bodies - responsesRefs map[string]*Reference // top level responses - headersRefs map[string]*Reference // top level responses - examplesRefs map[string]*Reference // top level examples - securityRequirementRefs map[string]map[string][]*Reference // (NOT $ref) but a name based lookup for requirements - callbacksRefs map[string]map[string][]*Reference // all links - linksRefs map[string]map[string][]*Reference // all callbacks - operationTagsRefs map[string]map[string][]*Reference // tags found in operations - operationDescriptionRefs map[string]map[string]*Reference // descriptions in operations. - operationSummaryRefs map[string]map[string]*Reference // summaries in operations - callbackRefs map[string]*Reference // top level callback refs - serversRefs []*Reference // all top level server refs - rootServersNode *yaml.Node // servers root node - opServersRefs map[string]map[string][]*Reference // all operation level server overrides. - polymorphicRefs map[string]*Reference // every reference to a polymorphic ref - polymorphicAllOfRefs []*Reference // every reference to 'allOf' references - polymorphicOneOfRefs []*Reference // every reference to 'oneOf' references - polymorphicAnyOfRefs []*Reference // every reference to 'anyOf' references - externalDocumentsRef []*Reference // all external documents in spec - rootSecurity []*Reference // root security definitions. - rootSecurityNode *yaml.Node // root security node. - refsWithSiblings map[string]Reference // references with sibling elements next to them - pathRefsLock sync.Mutex // create lock for all refs maps, we want to build data as fast as we can - externalDocumentsCount int // number of externalDocument nodes found - operationTagsCount int // number of unique tags in operations - globalTagsCount int // number of global tags defined - totalTagsCount int // number unique tags in spec - securitySchemesCount int // security schemes - globalRequestBodiesCount int // component request bodies - globalResponsesCount int // component responses - globalHeadersCount int // component headers - globalExamplesCount int // component examples - globalLinksCount int // component links - globalCallbacksCount int // component callbacks - globalCallbacks int // component callbacks. - pathCount int // number of paths - operationCount int // number of operations - operationParamCount int // number of params defined in operations - componentParamCount int // number of params defined in components - componentsInlineParamUniqueCount int // number of inline params with unique names - componentsInlineParamDuplicateCount int // number of inline params with duplicate names - schemaCount int // number of schemas - refCount int // total ref count - root *yaml.Node // the root document - pathsNode *yaml.Node // paths node - tagsNode *yaml.Node // tags node - componentsNode *yaml.Node // components node - parametersNode *yaml.Node // components/parameters node - allParametersNode map[string]*Reference // all parameters node - allParameters map[string]*Reference // all parameters (components/defs) - schemasNode *yaml.Node // components/schemas node - allInlineSchemaDefinitions []*Reference // all schemas found in document outside of components (openapi) or definitions (swagger). - allInlineSchemaObjectDefinitions []*Reference // all schemas that are objects found in document outside of components (openapi) or definitions (swagger). - allComponentSchemaDefinitions map[string]*Reference // all schemas found in components (openapi) or definitions (swagger). - securitySchemesNode *yaml.Node // components/securitySchemes node - allSecuritySchemes map[string]*Reference // all security schemes / definitions. - requestBodiesNode *yaml.Node // components/requestBodies node - allRequestBodies map[string]*Reference // all request bodies - responsesNode *yaml.Node // components/responses node - allResponses map[string]*Reference // all responses - headersNode *yaml.Node // components/headers node - allHeaders map[string]*Reference // all headers - examplesNode *yaml.Node // components/examples node - allExamples map[string]*Reference // all components examples - linksNode *yaml.Node // components/links node - allLinks map[string]*Reference // all links - callbacksNode *yaml.Node // components/callbacks node - allCallbacks map[string]*Reference // all components examples - externalDocumentsNode *yaml.Node // external documents node - allExternalDocuments map[string]*Reference // all external documents - externalSpecIndex map[string]*SpecIndex // create a primary index of all external specs and componentIds - refErrors []*IndexingError // errors when indexing references - operationParamErrors []*IndexingError // errors when indexing parameters - allDescriptions []*DescriptionReference // every single description found in the spec. - allSummaries []*DescriptionReference // every single summary found in the spec. - allEnums []*EnumReference // every single enum found in the spec. - allObjectsWithProperties []*ObjectReference // every single object with properties found in the spec. - enumCount int - descriptionCount int - summaryCount int - seenRemoteSources map[string]*yaml.Node - remoteLock sync.Mutex - circularReferences []*CircularReferenceResult // only available when the resolver has been used. - allowCircularReferences bool // decide if you want to error out, or allow circular references, default is false. -} - -// ExternalLookupFunction is for lookup functions that take a JSONSchema reference and tries to find that node in the -// URI based document. Decides if the reference is local, remote or in a file. -type ExternalLookupFunction func(id string) (foundNode *yaml.Node, rootNode *yaml.Node, lookupError error) - -// IndexingError holds data about something that went wrong during indexing. -type IndexingError struct { - Error error - Node *yaml.Node - Path string -} - -// DescriptionReference holds data about a description that was found and where it was found. -type DescriptionReference struct { - Content string - Path string - Node *yaml.Node - IsSummary bool -} - -type EnumReference struct { - Node *yaml.Node - Type *yaml.Node - Path string - SchemaNode *yaml.Node - ParentNode *yaml.Node -} - -type ObjectReference struct { - Node *yaml.Node - Path string - ParentNode *yaml.Node -} - -var methodTypes = []string{"get", "post", "put", "patch", "options", "head", "delete"} - -func runIndexFunction(funcs []func() int, wg *sync.WaitGroup) { - for _, cFunc := range funcs { - go func(wg *sync.WaitGroup, cf func() int) { - cf() - wg.Done() - }(wg, cFunc) - } +// NewSpecIndexWithConfig will create a new index of an OpenAPI or Swagger spec. It uses the same logic as NewSpecIndex +// except it sets a base URL for resolving relative references, except it also allows for granular control over +// how the index is set up. +func NewSpecIndexWithConfig(rootNode *yaml.Node, config *SpecIndexConfig) *SpecIndex { + index := new(SpecIndex) + index.config = config + boostrapIndexCollections(rootNode, index) + return createNewIndex(rootNode, index) } // NewSpecIndex will create a new index of an OpenAPI or Swagger spec. It's not resolved or converted into anything // other than a raw index of every node for every content type in the specification. This process runs as fast as // possible so dependencies looking through the tree, don't need to walk the entire thing over, and over. +// +// Deprecated: Use NewSpecIndexWithConfig instead, this function will be removed in the future because it +// defaults to allowing remote references and file references. This is a potential security risk and should be controlled by +// providing a SpecIndexConfig that explicitly sets the AllowRemoteLookup and AllowFileLookup to true. +// This function also does not support specifications with relative references that may not exist locally. +// - https://github.com/pb33f/libopenapi/issues/73 func NewSpecIndex(rootNode *yaml.Node) *SpecIndex { index := new(SpecIndex) - index.root = rootNode - index.allRefs = make(map[string]*Reference) - index.allMappedRefs = make(map[string]*Reference) - index.refsByLine = make(map[string]map[int]bool) - index.linesWithRefs = make(map[int]bool) - index.pathRefs = make(map[string]map[string]*Reference) - index.paramOpRefs = make(map[string]map[string]map[string]*Reference) - index.operationTagsRefs = make(map[string]map[string][]*Reference) - index.operationDescriptionRefs = make(map[string]map[string]*Reference) - index.operationSummaryRefs = make(map[string]map[string]*Reference) - index.paramCompRefs = make(map[string]*Reference) - index.paramAllRefs = make(map[string]*Reference) - index.paramInlineDuplicates = make(map[string][]*Reference) - index.globalTagRefs = make(map[string]*Reference) - index.securitySchemeRefs = make(map[string]*Reference) - index.requestBodiesRefs = make(map[string]*Reference) - index.responsesRefs = make(map[string]*Reference) - index.headersRefs = make(map[string]*Reference) - index.examplesRefs = make(map[string]*Reference) - index.callbacksRefs = make(map[string]map[string][]*Reference) - index.linksRefs = make(map[string]map[string][]*Reference) - index.callbackRefs = make(map[string]*Reference) - index.externalSpecIndex = make(map[string]*SpecIndex) - index.allComponentSchemaDefinitions = make(map[string]*Reference) - index.allParameters = make(map[string]*Reference) - index.allSecuritySchemes = make(map[string]*Reference) - index.allRequestBodies = make(map[string]*Reference) - index.allResponses = make(map[string]*Reference) - index.allHeaders = make(map[string]*Reference) - index.allExamples = make(map[string]*Reference) - index.allLinks = make(map[string]*Reference) - index.allCallbacks = make(map[string]*Reference) - index.allExternalDocuments = make(map[string]*Reference) - index.securityRequirementRefs = make(map[string]map[string][]*Reference) - index.polymorphicRefs = make(map[string]*Reference) - index.refsWithSiblings = make(map[string]Reference) - index.seenRemoteSources = make(map[string]*yaml.Node) - index.opServersRefs = make(map[string]map[string][]*Reference) + index.config = &SpecIndexConfig{ + AllowRemoteLookup: true, + AllowFileLookup: true, + } + boostrapIndexCollections(rootNode, index) + return createNewIndex(rootNode, index) +} +func createNewIndex(rootNode *yaml.Node, index *SpecIndex) *SpecIndex { // there is no node! return an empty index. if rootNode == nil { return index @@ -253,9 +59,6 @@ func NewSpecIndex(rootNode *yaml.Node) *SpecIndex { // boot index. results := index.ExtractRefs(index.root.Content[0], index.root, []string{}, 0, false, "") - // pull out references - index.ExtractComponentsFromRefs(results) - // map poly refs poly := make([]*Reference, len(index.polymorphicRefs)) z := 0 @@ -263,6 +66,9 @@ func NewSpecIndex(rootNode *yaml.Node) *SpecIndex { poly[z] = index.polymorphicRefs[i] z++ } + + // pull out references + index.ExtractComponentsFromRefs(results) index.ExtractComponentsFromRefs(poly) index.ExtractExternalDocuments(index.root) @@ -526,12 +332,12 @@ func (index *SpecIndex) GetParametersNode() *yaml.Node { } // GetReferenceIndexErrors will return any errors that occurred when indexing references -func (index *SpecIndex) GetReferenceIndexErrors() []*IndexingError { +func (index *SpecIndex) GetReferenceIndexErrors() []error { return index.refErrors } // GetOperationParametersIndexErrors any errors that occurred when indexing operation parameters -func (index *SpecIndex) GetOperationParametersIndexErrors() []*IndexingError { +func (index *SpecIndex) GetOperationParametersIndexErrors() []error { return index.operationParamErrors } @@ -608,313 +414,7 @@ func (index *SpecIndex) checkPolymorphicNode(name string) (bool, string) { return false, "" } -// ExtractRefs will return a deduplicated slice of references for every unique ref found in the document. -// The total number of refs, will generally be much higher, you can extract those from GetRawReferenceCount() -func (index *SpecIndex) ExtractRefs(node, parent *yaml.Node, seenPath []string, level int, poly bool, pName string) []*Reference { - if node == nil { - return nil - } - var found []*Reference - if len(node.Content) > 0 { - var prev, polyName string - for i, n := range node.Content { - if utils.IsNodeMap(n) || utils.IsNodeArray(n) { - level++ - // check if we're using polymorphic values. These tend to create rabbit warrens of circular - // references if every single link is followed. We don't resolve polymorphic values. - isPoly, _ := index.checkPolymorphicNode(prev) - polyName = pName - if isPoly { - poly = true - if prev != "" { - polyName = prev - } - } - found = append(found, index.ExtractRefs(n, node, seenPath, level, poly, polyName)...) - } - - // check if we're dealing with an inline schema definition, that isn't part of an array - // (which means it's being used as a value in an array, and it's not a label) - // https://github.com/pb33f/libopenapi/issues/76 - if i%2 == 0 && n.Value == "schema" && !utils.IsNodeArray(node) && (i+1 < len(node.Content)) { - isRef, _, _ := utils.IsNodeRefValue(node.Content[i+1]) - if isRef { - continue - } - ref := &Reference{ - Node: node.Content[i+1], - Path: fmt.Sprintf("$.%s", strings.Join(seenPath, ".")), - } - index.allInlineSchemaDefinitions = append(index.allInlineSchemaDefinitions, ref) - - // check if the schema is an object or an array, - // and if so, add it to the list of inline schema object definitions. - k, v := utils.FindKeyNodeTop("type", node.Content[i+1].Content) - if k != nil && v != nil { - if v.Value == "object" || v.Value == "array" { - index.allInlineSchemaObjectDefinitions = append(index.allInlineSchemaObjectDefinitions, ref) - } - } - } - - if i%2 == 0 && n.Value == "$ref" { - - // only look at scalar values, not maps (looking at you k8s) - if !utils.IsNodeStringValue(node.Content[i+1]) { - continue - } - - index.linesWithRefs[n.Line] = true - - fp := make([]string, len(seenPath)) - for x, foundPathNode := range seenPath { - fp[x] = foundPathNode - } - - value := node.Content[i+1].Value - - segs := strings.Split(value, "/") - name := segs[len(segs)-1] - _, p := utils.ConvertComponentIdIntoFriendlyPathSearch(value) - ref := &Reference{ - Definition: value, - Name: name, - Node: node, - Path: p, - } - - // add to raw sequenced refs - index.rawSequencedRefs = append(index.rawSequencedRefs, ref) - - // add ref by line number - refNameIndex := strings.LastIndex(value, "/") - refName := value[refNameIndex+1:] - if len(index.refsByLine[refName]) > 0 { - index.refsByLine[refName][n.Line] = true - } else { - v := make(map[int]bool) - v[n.Line] = true - index.refsByLine[refName] = v - } - - // if this ref value has any siblings (node.Content is larger than two elements) - // then add to refs with siblings - if len(node.Content) > 2 { - copiedNode := *node - copied := Reference{ - Definition: ref.Definition, - Name: ref.Name, - Node: &copiedNode, - Path: p, - } - // protect this data using a copy, prevent the resolver from destroying things. - index.refsWithSiblings[value] = copied - } - - // if this is a polymorphic reference, we're going to leave it out - // allRefs. We don't ever want these resolved, so instead of polluting - // the timeline, we will keep each poly ref in its own collection for later - // analysis. - if poly { - index.polymorphicRefs[value] = ref - - // index each type - switch pName { - case "anyOf": - index.polymorphicAnyOfRefs = append(index.polymorphicAnyOfRefs, ref) - case "allOf": - index.polymorphicAllOfRefs = append(index.polymorphicAllOfRefs, ref) - case "oneOf": - index.polymorphicOneOfRefs = append(index.polymorphicOneOfRefs, ref) - } - continue - } - - // check if this is a dupe, if so, skip it, we don't care now. - if index.allRefs[value] != nil { // seen before, skip. - continue - } - - if value == "" { - - completedPath := fmt.Sprintf("$.%s", strings.Join(fp, ".")) - - indexError := &IndexingError{ - Error: errors.New("schema reference is empty and cannot be processed"), - Node: node.Content[i+1], - Path: completedPath, - } - - index.refErrors = append(index.refErrors, indexError) - - continue - } - - index.allRefs[value] = ref - found = append(found, ref) - } - - if i%2 == 0 && n.Value != "$ref" && n.Value != "" { - - nodePath := fmt.Sprintf("$.%s", strings.Join(seenPath, ".")) - - // capture descriptions and summaries - if n.Value == "description" { - - // if the parent is a sequence, ignore. - if utils.IsNodeArray(node) { - continue - } - - ref := &DescriptionReference{ - Content: node.Content[i+1].Value, - Path: nodePath, - Node: node.Content[i+1], - IsSummary: false, - } - - index.allDescriptions = append(index.allDescriptions, ref) - index.descriptionCount++ - } - - if n.Value == "summary" { - - var b *yaml.Node - if len(node.Content) == i+1 { - b = node.Content[i] - } else { - b = node.Content[i+1] - } - ref := &DescriptionReference{ - Content: b.Value, - Path: nodePath, - Node: b, - IsSummary: true, - } - - index.allSummaries = append(index.allSummaries, ref) - index.summaryCount++ - } - - // capture security requirement references (these are not traditional references, but they - // are used as a look-up. This is the only exception to the design. - if n.Value == "security" { - var b *yaml.Node - if len(node.Content) == i+1 { - b = node.Content[i] - } else { - b = node.Content[i+1] - } - if utils.IsNodeArray(b) { - var secKey string - for k := range b.Content { - if utils.IsNodeMap(b.Content[k]) { - for g := range b.Content[k].Content { - if g%2 == 0 { - secKey = b.Content[k].Content[g].Value - continue - } - if utils.IsNodeArray(b.Content[k].Content[g]) { - var refMap map[string][]*Reference - if index.securityRequirementRefs[secKey] == nil { - index.securityRequirementRefs[secKey] = make(map[string][]*Reference) - refMap = index.securityRequirementRefs[secKey] - } else { - refMap = index.securityRequirementRefs[secKey] - } - for r := range b.Content[k].Content[g].Content { - var refs []*Reference - if refMap[b.Content[k].Content[g].Content[r].Value] != nil { - refs = refMap[b.Content[k].Content[g].Content[r].Value] - } - - refs = append(refs, &Reference{ - Definition: b.Content[k].Content[g].Content[r].Value, - Path: fmt.Sprintf("%s.security[%d].%s[%d]", nodePath, k, secKey, r), - Node: b.Content[k].Content[g].Content[r], - }) - - index.securityRequirementRefs[secKey][b.Content[k].Content[g].Content[r].Value] = refs - } - } - } - } - } - } - } - // capture enums - if n.Value == "enum" { - - // all enums need to have a type, extract the type from the node where the enum was found. - _, enumKeyValueNode := utils.FindKeyNodeTop("type", node.Content) - - if enumKeyValueNode != nil { - ref := &EnumReference{ - Path: nodePath, - Node: node.Content[i+1], - Type: enumKeyValueNode, - SchemaNode: node, - ParentNode: parent, - } - - index.allEnums = append(index.allEnums, ref) - index.enumCount++ - } - } - // capture all objects with properties - if n.Value == "properties" { - _, typeKeyValueNode := utils.FindKeyNodeTop("type", node.Content) - - if typeKeyValueNode != nil { - isObject := false - - if typeKeyValueNode.Value == "object" { - isObject = true - } - - for _, v := range typeKeyValueNode.Content { - if v.Value == "object" { - isObject = true - } - } - - if isObject { - index.allObjectsWithProperties = append(index.allObjectsWithProperties, &ObjectReference{ - Path: nodePath, - Node: node, - ParentNode: parent, - }) - } - } - } - - seenPath = append(seenPath, n.Value) - prev = n.Value - } - - // if next node is map, don't add segment. - if i < len(node.Content)-1 { - next := node.Content[i+1] - - if i%2 != 0 && next != nil && !utils.IsNodeArray(next) && !utils.IsNodeMap(next) { - seenPath = seenPath[:len(seenPath)-1] - } - } - } - if len(seenPath) > 0 { - seenPath = seenPath[:len(seenPath)-1] - } - - } - if len(seenPath) > 0 { - seenPath = seenPath[:len(seenPath)-1] - } - - index.refCount = len(index.allRefs) - - return found -} // GetPathCount will return the number of paths found in the spec func (index *SpecIndex) GetPathCount() int { @@ -1623,108 +1123,6 @@ func (index *SpecIndex) GetInlineUniqueParamCount() int { return index.countUniqueInlineDuplicates() } -// ExtractComponentsFromRefs returns located components from references. The returned nodes from here -// can be used for resolving as they contain the actual object properties. -func (index *SpecIndex) ExtractComponentsFromRefs(refs []*Reference) []*Reference { - var found []*Reference - for _, ref := range refs { - - // check reference for backslashes (hah yeah seen this too!) - if strings.Contains(ref.Definition, "\\") { // this was from blazemeter.com haha! - _, path := utils.ConvertComponentIdIntoFriendlyPathSearch(ref.Definition) - indexError := &IndexingError{ - Error: fmt.Errorf("component '%s' contains a backslash '\\'. It's not valid", ref.Definition), - Node: ref.Node, - Path: path, - } - index.refErrors = append(index.refErrors, indexError) - continue - } - - located := index.FindComponent(ref.Definition, ref.Node) - if located != nil { - if index.allMappedRefs[ref.Definition] == nil { - found = append(found, located) - index.allMappedRefs[ref.Definition] = located - index.allMappedRefsSequenced = append(index.allMappedRefsSequenced, &ReferenceMapped{ - Reference: located, - Definition: ref.Definition, - }) - } - } else { - - _, path := utils.ConvertComponentIdIntoFriendlyPathSearch(ref.Definition) - indexError := &IndexingError{ - Error: fmt.Errorf("component '%s' does not exist in the specification", ref.Definition), - Node: ref.Node, - Path: path, - } - index.refErrors = append(index.refErrors, indexError) - } - } - return found -} - -// FindComponent will locate a component by its reference, returns nil if nothing is found. -// This method will recurse through remote, local and file references. For each new external reference -// a new index will be created. These indexes can then be traversed recursively. -func (index *SpecIndex) FindComponent(componentId string, parent *yaml.Node) *Reference { - if index.root == nil { - return nil - } - - // FIXME: This is a potential security hole, and needs to be made optional (see log4j fiasco) - remoteLookup := func(id string) (*yaml.Node, *yaml.Node, error) { - return index.lookupRemoteReference(id) - } - - // FIXME: As above - fileLookup := func(id string) (*yaml.Node, *yaml.Node, error) { - return index.lookupFileReference(id) - } - - switch DetermineReferenceResolveType(componentId) { - case LocalResolve: // ideally, every single ref in every single spec is local. however, this is not the case. - return index.FindComponentInRoot(componentId) - - case HttpResolve: - uri := strings.Split(componentId, "#") - if len(uri) >= 2 { - return index.performExternalLookup(uri, componentId, remoteLookup, parent) - } - if len(uri) == 1 { - // if there is no reference, second segment is empty / has no name - // this means there is no component to look-up and the entire file should be pulled in. - // to stop all the other code from breaking (that is expecting a component), let's just post-pend - // a hash to the end of the componentId and ensure the uri slice is as expected. - // described in https://github.com/pb33f/libopenapi/issues/37 - componentId = fmt.Sprintf("%s#", componentId) - uri = append(uri, "") - return index.performExternalLookup(uri, componentId, remoteLookup, parent) - } - - case FileResolve: - uri := strings.Split(componentId, "#") - if len(uri) == 2 { - return index.performExternalLookup(uri, componentId, fileLookup, parent) - } - if len(uri) == 1 { - // if there is no reference, second segment is empty / has no name - // this means there is no component to look-up and the entire file should be pulled in. - // to stop all the other code from breaking (that is expecting a component), let's just post-pend - // a hash to the end of the componentId and ensure the uri slice is as expected. - // described in https://github.com/pb33f/libopenapi/issues/37 - // - // ^^ this same issue was re-reported in file based lookups in vacuum. - // more info here: https://github.com/daveshanley/vacuum/issues/225 - componentId = fmt.Sprintf("%s#", componentId) - uri = append(uri, "") - return index.performExternalLookup(uri, componentId, fileLookup, parent) - } - } - return nil -} - // GetAllDescriptionsCount will collect together every single description found in the document func (index *SpecIndex) GetAllDescriptionsCount() int { return len(index.allDescriptions) @@ -1734,556 +1132,3 @@ func (index *SpecIndex) GetAllDescriptionsCount() int { func (index *SpecIndex) GetAllSummariesCount() int { return len(index.allSummaries) } - -func DetermineReferenceResolveType(ref string) int { - if ref != "" && ref[0] == '#' { - return LocalResolve - } - if ref != "" && len(ref) >= 5 && (ref[:5] == "https" || ref[:5] == "http:") { - return HttpResolve - } - if strings.Contains(ref, ".json") || - strings.Contains(ref, ".yaml") || - strings.Contains(ref, ".yml") { - return FileResolve - } - return -1 -} - -/* private */ - -func (index *SpecIndex) extractDefinitionsAndSchemas(schemasNode *yaml.Node, pathPrefix string) { - var name string - for i, schema := range schemasNode.Content { - if i%2 == 0 { - name = schema.Value - continue - } - - def := fmt.Sprintf("%s%s", pathPrefix, name) - ref := &Reference{ - Definition: def, - Name: name, - Node: schema, - Path: fmt.Sprintf("$.components.schemas.%s", name), - ParentNode: schemasNode, - RequiredRefProperties: index.extractDefinitionRequiredRefProperties(schemasNode, map[string][]string{}), - } - index.allComponentSchemaDefinitions[def] = ref - } -} - -// extractDefinitionRequiredRefProperties goes through the direct properties of a schema and extracts the map of required definitions from within it -func (index *SpecIndex) extractDefinitionRequiredRefProperties(schemaNode *yaml.Node, reqRefProps map[string][]string) map[string][]string { - if schemaNode == nil { - return reqRefProps - } - - // If the node we're looking at is a direct ref to another model without any properties, mark it as required, but still continue to look for required properties - isRef, _, defPath := utils.IsNodeRefValue(schemaNode) - if isRef { - if _, ok := reqRefProps[defPath]; !ok { - reqRefProps[defPath] = []string{} - } - } - - // Check for a required parameters list, and return if none exists, as any properties will be optional - _, requiredSeqNode := utils.FindKeyNodeTop("required", schemaNode.Content) - if requiredSeqNode == nil { - return reqRefProps - } - - _, propertiesMapNode := utils.FindKeyNodeTop("properties", schemaNode.Content) - if propertiesMapNode == nil { - // TODO: Log a warning on the resolver, because if you have required properties, but no actual properties, something is wrong - return reqRefProps - } - - name := "" - for i, param := range propertiesMapNode.Content { - if i%2 == 0 { - name = param.Value - continue - } - - // Check to see if the current property is directly embedded within the current schema, and handle its properties if so - _, paramPropertiesMapNode := utils.FindKeyNodeTop("properties", param.Content) - if paramPropertiesMapNode != nil { - reqRefProps = index.extractDefinitionRequiredRefProperties(param, reqRefProps) - } - - // Check to see if the current property is polymorphic, and dive into that model if so - for _, key := range []string{"allOf", "oneOf", "anyOf"} { - _, ofNode := utils.FindKeyNodeTop(key, param.Content) - if ofNode != nil { - for _, ofNodeItem := range ofNode.Content { - reqRefProps = index.extractRequiredReferenceProperties(ofNodeItem, name, reqRefProps) - } - } - } - } - - // Run through each of the required properties and extract _their_ required references - for _, requiredPropertyNode := range requiredSeqNode.Content { - _, requiredPropDefNode := utils.FindKeyNodeTop(requiredPropertyNode.Value, propertiesMapNode.Content) - if requiredPropDefNode == nil { - continue - } - - reqRefProps = index.extractRequiredReferenceProperties(requiredPropDefNode, requiredPropertyNode.Value, reqRefProps) - } - - return reqRefProps -} - -// extractRequiredReferenceProperties returns a map of definition names to the property or properties which reference it within a node -func (index *SpecIndex) extractRequiredReferenceProperties(requiredPropDefNode *yaml.Node, propName string, reqRefProps map[string][]string) map[string][]string { - isRef, _, defPath := utils.IsNodeRefValue(requiredPropDefNode) - if !isRef { - _, defItems := utils.FindKeyNodeTop("items", requiredPropDefNode.Content) - if defItems != nil { - isRef, _, defPath = utils.IsNodeRefValue(defItems) - } - } - - if /* still */ !isRef { - return reqRefProps - } - - if _, ok := reqRefProps[defPath]; !ok { - reqRefProps[defPath] = []string{} - } - reqRefProps[defPath] = append(reqRefProps[defPath], propName) - - return reqRefProps -} - -func (index *SpecIndex) extractComponentParameters(paramsNode *yaml.Node, pathPrefix string) { - var name string - for i, param := range paramsNode.Content { - if i%2 == 0 { - name = param.Value - continue - } - def := fmt.Sprintf("%s%s", pathPrefix, name) - ref := &Reference{ - Definition: def, - Name: name, - Node: param, - } - index.allParameters[def] = ref - } -} - -func (index *SpecIndex) extractComponentRequestBodies(requestBodiesNode *yaml.Node, pathPrefix string) { - var name string - for i, reqBod := range requestBodiesNode.Content { - if i%2 == 0 { - name = reqBod.Value - continue - } - def := fmt.Sprintf("%s%s", pathPrefix, name) - ref := &Reference{ - Definition: def, - Name: name, - Node: reqBod, - } - index.allRequestBodies[def] = ref - } -} - -func (index *SpecIndex) extractComponentResponses(responsesNode *yaml.Node, pathPrefix string) { - var name string - for i, response := range responsesNode.Content { - if i%2 == 0 { - name = response.Value - continue - } - def := fmt.Sprintf("%s%s", pathPrefix, name) - ref := &Reference{ - Definition: def, - Name: name, - Node: response, - } - index.allResponses[def] = ref - } -} - -func (index *SpecIndex) extractComponentHeaders(headersNode *yaml.Node, pathPrefix string) { - var name string - for i, header := range headersNode.Content { - if i%2 == 0 { - name = header.Value - continue - } - def := fmt.Sprintf("%s%s", pathPrefix, name) - ref := &Reference{ - Definition: def, - Name: name, - Node: header, - } - index.allHeaders[def] = ref - } -} - -func (index *SpecIndex) extractComponentCallbacks(callbacksNode *yaml.Node, pathPrefix string) { - var name string - for i, callback := range callbacksNode.Content { - if i%2 == 0 { - name = callback.Value - continue - } - def := fmt.Sprintf("%s%s", pathPrefix, name) - ref := &Reference{ - Definition: def, - Name: name, - Node: callback, - } - index.allCallbacks[def] = ref - } -} - -func (index *SpecIndex) extractComponentLinks(linksNode *yaml.Node, pathPrefix string) { - var name string - for i, link := range linksNode.Content { - if i%2 == 0 { - name = link.Value - continue - } - def := fmt.Sprintf("%s%s", pathPrefix, name) - ref := &Reference{ - Definition: def, - Name: name, - Node: link, - } - index.allLinks[def] = ref - } -} - -func (index *SpecIndex) extractComponentExamples(examplesNode *yaml.Node, pathPrefix string) { - var name string - for i, example := range examplesNode.Content { - if i%2 == 0 { - name = example.Value - continue - } - def := fmt.Sprintf("%s%s", pathPrefix, name) - ref := &Reference{ - Definition: def, - Name: name, - Node: example, - } - index.allExamples[def] = ref - } -} - -func (index *SpecIndex) extractComponentSecuritySchemes(securitySchemesNode *yaml.Node, pathPrefix string) { - var name string - for i, secScheme := range securitySchemesNode.Content { - if i%2 == 0 { - name = secScheme.Value - continue - } - def := fmt.Sprintf("%s%s", pathPrefix, name) - ref := &Reference{ - Definition: def, - Name: name, - Node: secScheme, - ParentNode: securitySchemesNode, - Path: fmt.Sprintf("$.components.securitySchemes.%s", name), - } - index.allSecuritySchemes[def] = ref - } -} - -func (index *SpecIndex) performExternalLookup(uri []string, componentId string, - lookupFunction ExternalLookupFunction, parent *yaml.Node, -) *Reference { - if len(uri) > 0 { - externalSpecIndex := index.externalSpecIndex[uri[0]] - if externalSpecIndex == nil { - _, newRoot, err := lookupFunction(componentId) - if err != nil { - indexError := &IndexingError{ - Error: err, - Node: parent, - Path: componentId, - } - index.refErrors = append(index.refErrors, indexError) - return nil - } - - // cool, cool, lets index this spec also. This is a recursive action and will keep going - // until all remote references have been found. - newIndex := NewSpecIndex(newRoot) - index.externalSpecIndex[uri[0]] = newIndex - externalSpecIndex = newIndex - } - - foundRef := externalSpecIndex.FindComponentInRoot(uri[1]) - if foundRef != nil { - nameSegs := strings.Split(uri[1], "/") - ref := &Reference{ - Definition: componentId, - Name: nameSegs[len(nameSegs)-1], - Node: foundRef.Node, - IsRemote: true, - RemoteLocation: componentId, - Path: foundRef.Path, - } - return ref - } - } - return nil -} - -func (index *SpecIndex) FindComponentInRoot(componentId string) *Reference { - if index.root != nil { - name, friendlySearch := utils.ConvertComponentIdIntoFriendlyPathSearch(componentId) - path, err := yamlpath.NewPath(friendlySearch) - if path == nil || err != nil { - return nil // no component found - } - res, _ := path.Find(index.root) - if len(res) == 1 { - ref := &Reference{ - Definition: componentId, - Name: name, - Node: res[0], - Path: friendlySearch, - RequiredRefProperties: index.extractDefinitionRequiredRefProperties(res[0], map[string][]string{}), - } - - return ref - } - } - return nil -} - -func (index *SpecIndex) countUniqueInlineDuplicates() int { - if index.componentsInlineParamUniqueCount > 0 { - return index.componentsInlineParamUniqueCount - } - unique := 0 - for _, p := range index.paramInlineDuplicates { - if len(p) == 1 { - unique++ - } - } - index.componentsInlineParamUniqueCount = unique - return unique -} - -func (index *SpecIndex) scanOperationParams(params []*yaml.Node, pathItemNode *yaml.Node, method string) { - for i, param := range params { - // param is ref - if len(param.Content) > 0 && param.Content[0].Value == "$ref" { - - paramRefName := param.Content[1].Value - paramRef := index.allMappedRefs[paramRefName] - - if index.paramOpRefs[pathItemNode.Value] == nil { - index.paramOpRefs[pathItemNode.Value] = make(map[string]map[string]*Reference) - index.paramOpRefs[pathItemNode.Value][method] = make(map[string]*Reference) - - } - // if we know the path, but it's a new method - if index.paramOpRefs[pathItemNode.Value][method] == nil { - index.paramOpRefs[pathItemNode.Value][method] = make(map[string]*Reference) - } - - // if this is a duplicate, add an error and ignore it - if index.paramOpRefs[pathItemNode.Value][method][paramRefName] != nil { - path := fmt.Sprintf("$.paths.%s.%s.parameters[%d]", pathItemNode.Value, method, i) - if method == "top" { - path = fmt.Sprintf("$.paths.%s.parameters[%d]", pathItemNode.Value, i) - } - - index.operationParamErrors = append(index.operationParamErrors, &IndexingError{ - Error: fmt.Errorf("the `%s` operation parameter at path `%s`, "+ - "index %d has a duplicate ref `%s`", method, pathItemNode.Value, i, paramRefName), - Node: param, - Path: path, - }) - } else { - index.paramOpRefs[pathItemNode.Value][method][paramRefName] = paramRef - } - - continue - - } else { - - // param is inline. - _, vn := utils.FindKeyNode("name", param.Content) - - path := fmt.Sprintf("$.paths.%s.%s.parameters[%d]", pathItemNode.Value, method, i) - if method == "top" { - path = fmt.Sprintf("$.paths.%s.parameters[%d]", pathItemNode.Value, i) - } - - if vn == nil { - index.operationParamErrors = append(index.operationParamErrors, &IndexingError{ - Error: fmt.Errorf("the '%s' operation parameter at path '%s', index %d has no 'name' value", - method, pathItemNode.Value, i), - Node: param, - Path: path, - }) - continue - } - - ref := &Reference{ - Definition: vn.Value, - Name: vn.Value, - Node: param, - Path: path, - } - if index.paramOpRefs[pathItemNode.Value] == nil { - index.paramOpRefs[pathItemNode.Value] = make(map[string]map[string]*Reference) - index.paramOpRefs[pathItemNode.Value][method] = make(map[string]*Reference) - } - - // if we know the path but this is a new method. - if index.paramOpRefs[pathItemNode.Value][method] == nil { - index.paramOpRefs[pathItemNode.Value][method] = make(map[string]*Reference) - } - - // if this is a duplicate, add an error and ignore it - if index.paramOpRefs[pathItemNode.Value][method][ref.Name] != nil { - path := fmt.Sprintf("$.paths.%s.%s.parameters[%d]", pathItemNode.Value, method, i) - if method == "top" { - path = fmt.Sprintf("$.paths.%s.parameters[%d]", pathItemNode.Value, i) - } - - index.operationParamErrors = append(index.operationParamErrors, &IndexingError{ - Error: fmt.Errorf("the `%s` operation parameter at path `%s`, "+ - "index %d has a duplicate name `%s`", method, pathItemNode.Value, i, vn.Value), - Node: param, - Path: path, - }) - } else { - index.paramOpRefs[pathItemNode.Value][method][ref.Name] = ref - } - continue - } - } -} - -func isHttpMethod(val string) bool { - switch strings.ToLower(val) { - case methodTypes[0]: - return true - case methodTypes[1]: - return true - case methodTypes[2]: - return true - case methodTypes[3]: - return true - case methodTypes[4]: - return true - case methodTypes[5]: - return true - case methodTypes[6]: - return true - } - return false -} - -func (index *SpecIndex) lookupRemoteReference(ref string) (*yaml.Node, *yaml.Node, error) { - // split string to remove file reference - uri := strings.Split(ref, "#") - - var parsedRemoteDocument *yaml.Node - if index.seenRemoteSources[uri[0]] != nil { - parsedRemoteDocument = index.seenRemoteSources[uri[0]] - } else { - resp, err := http.Get(uri[0]) - if err != nil { - return nil, nil, err - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, nil, err - } - - var remoteDoc yaml.Node - err = yaml.Unmarshal(body, &remoteDoc) - if err != nil { - return nil, nil, err - } - parsedRemoteDocument = &remoteDoc - index.remoteLock.Lock() - index.seenRemoteSources[uri[0]] = &remoteDoc - index.remoteLock.Unlock() - } - - // lookup item from reference by using a path query. - var query string - if len(uri) >= 2 { - query = fmt.Sprintf("$%s", strings.ReplaceAll(uri[1], "/", ".")) - } else { - query = "$" - } - - // remove any URL encoding - query = strings.Replace(query, "~1", "./", 1) - query = strings.ReplaceAll(query, "~1", "/") - - path, err := yamlpath.NewPath(query) - if err != nil { - return nil, nil, err - } - result, _ := path.Find(parsedRemoteDocument) - if len(result) == 1 { - return result[0], parsedRemoteDocument, nil - } - return nil, nil, nil -} - -func (index *SpecIndex) lookupFileReference(ref string) (*yaml.Node, *yaml.Node, error) { - // split string to remove file reference - uri := strings.Split(ref, "#") - - file := strings.ReplaceAll(uri[0], "file:", "") - - var parsedRemoteDocument *yaml.Node - if index.seenRemoteSources[file] != nil { - parsedRemoteDocument = index.seenRemoteSources[file] - } else { - - body, err := ioutil.ReadFile(file) - if err != nil { - return nil, nil, err - } - - var remoteDoc yaml.Node - err = yaml.Unmarshal(body, &remoteDoc) - if err != nil { - return nil, nil, err - } - parsedRemoteDocument = &remoteDoc - index.seenRemoteSources[file] = &remoteDoc - } - - // lookup item from reference by using a path query. - var query string - if len(uri) >= 2 { - query = fmt.Sprintf("$%s", strings.ReplaceAll(uri[1], "/", ".")) - } else { - query = "$" - } - - // remove any URL encoding - query = strings.Replace(query, "~1", "./", 1) - query = strings.ReplaceAll(query, "~1", "/") - - path, err := yamlpath.NewPath(query) - if err != nil { - return nil, nil, err - } - result, _ := path.Find(parsedRemoteDocument) - if len(result) == 1 { - return result[0], parsedRemoteDocument, nil - } - - return nil, parsedRemoteDocument, nil -} diff --git a/index/spec_index_test.go b/index/spec_index_test.go index 1aada27..82cd5bc 100644 --- a/index/spec_index_test.go +++ b/index/spec_index_test.go @@ -6,6 +6,7 @@ package index import ( "fmt" "io/ioutil" + "net/url" "os" "testing" @@ -81,6 +82,55 @@ func TestSpecIndex_Asana(t *testing.T) { assert.Equal(t, 69, index.componentsInlineParamUniqueCount) } +func TestSpecIndex_DigitalOcean(t *testing.T) { + asana, _ := ioutil.ReadFile("../test_specs/digitalocean.yaml") + var rootNode yaml.Node + yaml.Unmarshal(asana, &rootNode) + + baseURL, _ := url.Parse("https://raw.githubusercontent.com/digitalocean/openapi/main/specification") + index := NewSpecIndexWithConfig(&rootNode, &SpecIndexConfig{ + BaseURL: baseURL, + AllowRemoteLookup: true, + AllowFileLookup: true, + }) + + assert.Len(t, index.GetAllExternalIndexes(), 291) + assert.NotNil(t, index) +} + +func TestSpecIndex_DigitalOcean_LookupsNotAllowed(t *testing.T) { + asana, _ := ioutil.ReadFile("../test_specs/digitalocean.yaml") + var rootNode yaml.Node + yaml.Unmarshal(asana, &rootNode) + + baseURL, _ := url.Parse("https://raw.githubusercontent.com/digitalocean/openapi/main/specification") + index := NewSpecIndexWithConfig(&rootNode, &SpecIndexConfig{ + BaseURL: baseURL, + }) + + // no lookups allowed, bits have not been set, so there should just be a bunch of errors. + assert.Len(t, index.GetAllExternalIndexes(), 0) + assert.True(t, len(index.GetReferenceIndexErrors()) > 0) +} + +func TestSpecIndex_BaseURLError(t *testing.T) { + asana, _ := ioutil.ReadFile("../test_specs/digitalocean.yaml") + var rootNode yaml.Node + yaml.Unmarshal(asana, &rootNode) + + // this should fail because the base url is not a valid url and digital ocean won't be able to resolve + // anything. + baseURL, _ := url.Parse("https://githerbs.com/fresh/herbs/for/you") + index := NewSpecIndexWithConfig(&rootNode, &SpecIndexConfig{ + BaseURL: baseURL, + AllowRemoteLookup: true, + AllowFileLookup: true, + }) + + assert.Len(t, index.GetAllExternalIndexes(), 0) + assert.Len(t, index.GetReferenceIndexErrors(), 582) +} + func TestSpecIndex_k8s(t *testing.T) { asana, _ := ioutil.ReadFile("../test_specs/k8s.json") var rootNode yaml.Node @@ -591,7 +641,9 @@ paths: } func TestSpecIndex_lookupRemoteReference_SeenSourceSimulation_BadJSON(t *testing.T) { - index := new(SpecIndex) + index := NewSpecIndexWithConfig(nil, &SpecIndexConfig{ + AllowRemoteLookup: true, + }) index.seenRemoteSources = make(map[string]*yaml.Node) a, b, err := index.lookupRemoteReference("https://google.com//logos/doodles/2022/labor-day-2022-6753651837109490.3-l.png#/hey") assert.Error(t, err) diff --git a/index/utility_methods.go b/index/utility_methods.go new file mode 100644 index 0000000..3e9d275 --- /dev/null +++ b/index/utility_methods.go @@ -0,0 +1,372 @@ +// Copyright 2023 Princess B33f Heavy Industries / Dave Shanley +// SPDX-License-Identifier: MIT + +package index + +import ( + "fmt" + "github.com/pb33f/libopenapi/utils" + "gopkg.in/yaml.v3" + "sync" +) + +func (index *SpecIndex) extractDefinitionsAndSchemas(schemasNode *yaml.Node, pathPrefix string) { + var name string + for i, schema := range schemasNode.Content { + if i%2 == 0 { + name = schema.Value + continue + } + + def := fmt.Sprintf("%s%s", pathPrefix, name) + ref := &Reference{ + Definition: def, + Name: name, + Node: schema, + Path: fmt.Sprintf("$.components.schemas.%s", name), + ParentNode: schemasNode, + RequiredRefProperties: index.extractDefinitionRequiredRefProperties(schemasNode, map[string][]string{}), + } + index.allComponentSchemaDefinitions[def] = ref + } +} + +// extractDefinitionRequiredRefProperties goes through the direct properties of a schema and extracts the map of required definitions from within it +func (index *SpecIndex) extractDefinitionRequiredRefProperties(schemaNode *yaml.Node, reqRefProps map[string][]string) map[string][]string { + if schemaNode == nil { + return reqRefProps + } + + // If the node we're looking at is a direct ref to another model without any properties, mark it as required, but still continue to look for required properties + isRef, _, defPath := utils.IsNodeRefValue(schemaNode) + if isRef { + if _, ok := reqRefProps[defPath]; !ok { + reqRefProps[defPath] = []string{} + } + } + + // Check for a required parameters list, and return if none exists, as any properties will be optional + _, requiredSeqNode := utils.FindKeyNodeTop("required", schemaNode.Content) + if requiredSeqNode == nil { + return reqRefProps + } + + _, propertiesMapNode := utils.FindKeyNodeTop("properties", schemaNode.Content) + if propertiesMapNode == nil { + // TODO: Log a warning on the resolver, because if you have required properties, but no actual properties, something is wrong + return reqRefProps + } + + name := "" + for i, param := range propertiesMapNode.Content { + if i%2 == 0 { + name = param.Value + continue + } + + // Check to see if the current property is directly embedded within the current schema, and handle its properties if so + _, paramPropertiesMapNode := utils.FindKeyNodeTop("properties", param.Content) + if paramPropertiesMapNode != nil { + reqRefProps = index.extractDefinitionRequiredRefProperties(param, reqRefProps) + } + + // Check to see if the current property is polymorphic, and dive into that model if so + for _, key := range []string{"allOf", "oneOf", "anyOf"} { + _, ofNode := utils.FindKeyNodeTop(key, param.Content) + if ofNode != nil { + for _, ofNodeItem := range ofNode.Content { + reqRefProps = index.extractRequiredReferenceProperties(ofNodeItem, name, reqRefProps) + } + } + } + } + + // Run through each of the required properties and extract _their_ required references + for _, requiredPropertyNode := range requiredSeqNode.Content { + _, requiredPropDefNode := utils.FindKeyNodeTop(requiredPropertyNode.Value, propertiesMapNode.Content) + if requiredPropDefNode == nil { + continue + } + + reqRefProps = index.extractRequiredReferenceProperties(requiredPropDefNode, requiredPropertyNode.Value, reqRefProps) + } + + return reqRefProps +} + +// extractRequiredReferenceProperties returns a map of definition names to the property or properties which reference it within a node +func (index *SpecIndex) extractRequiredReferenceProperties(requiredPropDefNode *yaml.Node, propName string, reqRefProps map[string][]string) map[string][]string { + isRef, _, defPath := utils.IsNodeRefValue(requiredPropDefNode) + if !isRef { + _, defItems := utils.FindKeyNodeTop("items", requiredPropDefNode.Content) + if defItems != nil { + isRef, _, defPath = utils.IsNodeRefValue(defItems) + } + } + + if /* still */ !isRef { + return reqRefProps + } + + if _, ok := reqRefProps[defPath]; !ok { + reqRefProps[defPath] = []string{} + } + reqRefProps[defPath] = append(reqRefProps[defPath], propName) + + return reqRefProps +} + +func (index *SpecIndex) extractComponentParameters(paramsNode *yaml.Node, pathPrefix string) { + var name string + for i, param := range paramsNode.Content { + if i%2 == 0 { + name = param.Value + continue + } + def := fmt.Sprintf("%s%s", pathPrefix, name) + ref := &Reference{ + Definition: def, + Name: name, + Node: param, + } + index.allParameters[def] = ref + } +} + +func (index *SpecIndex) extractComponentRequestBodies(requestBodiesNode *yaml.Node, pathPrefix string) { + var name string + for i, reqBod := range requestBodiesNode.Content { + if i%2 == 0 { + name = reqBod.Value + continue + } + def := fmt.Sprintf("%s%s", pathPrefix, name) + ref := &Reference{ + Definition: def, + Name: name, + Node: reqBod, + } + index.allRequestBodies[def] = ref + } +} + +func (index *SpecIndex) extractComponentResponses(responsesNode *yaml.Node, pathPrefix string) { + var name string + for i, response := range responsesNode.Content { + if i%2 == 0 { + name = response.Value + continue + } + def := fmt.Sprintf("%s%s", pathPrefix, name) + ref := &Reference{ + Definition: def, + Name: name, + Node: response, + } + index.allResponses[def] = ref + } +} + +func (index *SpecIndex) extractComponentHeaders(headersNode *yaml.Node, pathPrefix string) { + var name string + for i, header := range headersNode.Content { + if i%2 == 0 { + name = header.Value + continue + } + def := fmt.Sprintf("%s%s", pathPrefix, name) + ref := &Reference{ + Definition: def, + Name: name, + Node: header, + } + index.allHeaders[def] = ref + } +} + +func (index *SpecIndex) extractComponentCallbacks(callbacksNode *yaml.Node, pathPrefix string) { + var name string + for i, callback := range callbacksNode.Content { + if i%2 == 0 { + name = callback.Value + continue + } + def := fmt.Sprintf("%s%s", pathPrefix, name) + ref := &Reference{ + Definition: def, + Name: name, + Node: callback, + } + index.allCallbacks[def] = ref + } +} + +func (index *SpecIndex) extractComponentLinks(linksNode *yaml.Node, pathPrefix string) { + var name string + for i, link := range linksNode.Content { + if i%2 == 0 { + name = link.Value + continue + } + def := fmt.Sprintf("%s%s", pathPrefix, name) + ref := &Reference{ + Definition: def, + Name: name, + Node: link, + } + index.allLinks[def] = ref + } +} + +func (index *SpecIndex) extractComponentExamples(examplesNode *yaml.Node, pathPrefix string) { + var name string + for i, example := range examplesNode.Content { + if i%2 == 0 { + name = example.Value + continue + } + def := fmt.Sprintf("%s%s", pathPrefix, name) + ref := &Reference{ + Definition: def, + Name: name, + Node: example, + } + index.allExamples[def] = ref + } +} + +func (index *SpecIndex) extractComponentSecuritySchemes(securitySchemesNode *yaml.Node, pathPrefix string) { + var name string + for i, secScheme := range securitySchemesNode.Content { + if i%2 == 0 { + name = secScheme.Value + continue + } + def := fmt.Sprintf("%s%s", pathPrefix, name) + ref := &Reference{ + Definition: def, + Name: name, + Node: secScheme, + ParentNode: securitySchemesNode, + Path: fmt.Sprintf("$.components.securitySchemes.%s", name), + } + index.allSecuritySchemes[def] = ref + } +} + +func (index *SpecIndex) countUniqueInlineDuplicates() int { + if index.componentsInlineParamUniqueCount > 0 { + return index.componentsInlineParamUniqueCount + } + unique := 0 + for _, p := range index.paramInlineDuplicates { + if len(p) == 1 { + unique++ + } + } + index.componentsInlineParamUniqueCount = unique + return unique +} + +func (index *SpecIndex) scanOperationParams(params []*yaml.Node, pathItemNode *yaml.Node, method string) { + for i, param := range params { + // param is ref + if len(param.Content) > 0 && param.Content[0].Value == "$ref" { + + paramRefName := param.Content[1].Value + paramRef := index.allMappedRefs[paramRefName] + + if index.paramOpRefs[pathItemNode.Value] == nil { + index.paramOpRefs[pathItemNode.Value] = make(map[string]map[string]*Reference) + index.paramOpRefs[pathItemNode.Value][method] = make(map[string]*Reference) + + } + // if we know the path, but it's a new method + if index.paramOpRefs[pathItemNode.Value][method] == nil { + index.paramOpRefs[pathItemNode.Value][method] = make(map[string]*Reference) + } + + // if this is a duplicate, add an error and ignore it + if index.paramOpRefs[pathItemNode.Value][method][paramRefName] != nil { + path := fmt.Sprintf("$.paths.%s.%s.parameters[%d]", pathItemNode.Value, method, i) + if method == "top" { + path = fmt.Sprintf("$.paths.%s.parameters[%d]", pathItemNode.Value, i) + } + + index.operationParamErrors = append(index.operationParamErrors, &IndexingError{ + Err: fmt.Errorf("the `%s` operation parameter at path `%s`, "+ + "index %d has a duplicate ref `%s`", method, pathItemNode.Value, i, paramRefName), + Node: param, + Path: path, + }) + } else { + index.paramOpRefs[pathItemNode.Value][method][paramRefName] = paramRef + } + + continue + + } else { + + // param is inline. + _, vn := utils.FindKeyNode("name", param.Content) + + path := fmt.Sprintf("$.paths.%s.%s.parameters[%d]", pathItemNode.Value, method, i) + if method == "top" { + path = fmt.Sprintf("$.paths.%s.parameters[%d]", pathItemNode.Value, i) + } + + if vn == nil { + index.operationParamErrors = append(index.operationParamErrors, &IndexingError{ + Err: fmt.Errorf("the '%s' operation parameter at path '%s', index %d has no 'name' value", + method, pathItemNode.Value, i), + Node: param, + Path: path, + }) + continue + } + + ref := &Reference{ + Definition: vn.Value, + Name: vn.Value, + Node: param, + Path: path, + } + if index.paramOpRefs[pathItemNode.Value] == nil { + index.paramOpRefs[pathItemNode.Value] = make(map[string]map[string]*Reference) + index.paramOpRefs[pathItemNode.Value][method] = make(map[string]*Reference) + } + + // if we know the path but this is a new method. + if index.paramOpRefs[pathItemNode.Value][method] == nil { + index.paramOpRefs[pathItemNode.Value][method] = make(map[string]*Reference) + } + + // if this is a duplicate, add an error and ignore it + if index.paramOpRefs[pathItemNode.Value][method][ref.Name] != nil { + path := fmt.Sprintf("$.paths.%s.%s.parameters[%d]", pathItemNode.Value, method, i) + if method == "top" { + path = fmt.Sprintf("$.paths.%s.parameters[%d]", pathItemNode.Value, i) + } + + index.operationParamErrors = append(index.operationParamErrors, &IndexingError{ + Err: fmt.Errorf("the `%s` operation parameter at path `%s`, "+ + "index %d has a duplicate name `%s`", method, pathItemNode.Value, i, vn.Value), + Node: param, + Path: path, + }) + } else { + index.paramOpRefs[pathItemNode.Value][method][ref.Name] = ref + } + continue + } + } +} + +func runIndexFunction(funcs []func() int, wg *sync.WaitGroup) { + for _, cFunc := range funcs { + go func(wg *sync.WaitGroup, cf func() int) { + cf() + wg.Done() + }(wg, cFunc) + } +} diff --git a/resolver/resolver.go b/resolver/resolver.go index 330575a..e4c380b 100644 --- a/resolver/resolver.go +++ b/resolver/resolver.go @@ -104,7 +104,9 @@ func (resolver *Resolver) Resolve() []*ResolvingError { for _, ref := range mapped { seenReferences := make(map[string]bool) var journey []*index.Reference - ref.Reference.Node.Content = resolver.VisitReference(ref.Reference, seenReferences, journey, true) + if ref != nil && ref.Reference != nil { + ref.Reference.Node.Content = resolver.VisitReference(ref.Reference, seenReferences, journey, true) + } } schemas := resolver.specIndex.GetAllComponentSchemas() diff --git a/test_specs/digitalocean.yaml b/test_specs/digitalocean.yaml new file mode 100644 index 0000000..36918f7 --- /dev/null +++ b/test_specs/digitalocean.yaml @@ -0,0 +1,1587 @@ +openapi: '3.0.0' + +info: + title: DigitalOcean API + version: '2.0' + description: + $ref: 'description.yml#/introduction' + + license: + name: Apache 2.0 + url: 'https://www.apache.org/licenses/LICENSE-2.0.html' + contact: + name: DigitalOcean API Team + email: api-engineering@digitalocean.com + termsOfService: 'https://www.digitalocean.com/legal/terms-of-service-agreement/' + +servers: + - url: 'https://api.digitalocean.com' + description: production + +tags: + - name: 1-Click Applications + description: |- + 1-Click applications are pre-built Droplet images or Kubernetes apps with software, + features, and configuration details already set up for you. They can be found in the + [DigitalOcean Marketplace](https://www.digitalocean.com/docs/marketplace). + + - name: Account + description: Provides information about your current account. + + - name: Actions + description: |- + Actions are records of events that have occurred on the resources in your account. + These can be things like rebooting a Droplet, or transferring an image to a new region. + + An action object is created every time one of these actions is initiated. The action + object contains information about the current status of the action, start and complete + timestamps, and the associated resource type and ID. + + Every action that creates an action object is available through this endpoint. Completed + actions are not removed from this list and are always available for querying. + + **Note:** You can pass the following HTTP header with the request to have the API return + the `reserved_ips` stanza instead of the `floating_ips` stanza: + + - `Accept: application/vnd.digitalocean.reserveip+json` + + - name: Apps + description: |- + App Platform is a Platform-as-a-Service (PaaS) offering from DigitalOcean that allows + developers to publish code directly to DigitalOcean servers without worrying about the + underlying infrastructure. + + Most API operations are centered around a few core object types. Following are the + definitions of these types. These definitions will be omitted from the operation-specific + documentation. + + For documentation on app specifications (`AppSpec` objects), please refer to the + [product documentation](https://docs.digitalocean.com/products/app-platform/reference/app-spec/)). + + - name: Billing + description: |- + The billing endpoints allow you to retrieve your account balance, invoices + and billing history. + + **Balance:** By sending requests to the `/v2/customers/my/balance` endpoint, you can + retrieve the balance information for the requested customer account. + + **Invoices:** [Invoices](https://www.digitalocean.com/docs/accounts/billing/invoices/) + are generated on the first of each month for every DigitalOcean + customer. An invoice preview is generated daily, which can be accessed + with the `preview` keyword in place of `$INVOICE_UUID`. To interact with + invoices, you will generally send requests to the invoices endpoint at + `/v2/customers/my/invoices`. + + **Billing History:** Billing history is a record of billing events for your account. + For example, entries may include events like payments made, invoices + issued, or credits granted. To interact with invoices, you + will generally send requests to the invoices endpoint at + `/v2/customers/my/billing_history`. + + - name: Block Storage + description: |- + [DigitalOcean Block Storage Volumes](https://www.digitalocean.com/docs/volumes/) + provide expanded storage capacity for your Droplets and can be moved + between Droplets within a specific region. + + Volumes function as raw block devices, meaning they appear to the + operating system as locally attached storage which can be formatted using + any file system supported by the OS. They may be created in sizes from + 1GiB to 16TiB. + + By sending requests to the `/v2/volumes` endpoint, you can list, create, or + delete volumes as well as attach and detach them from Droplets + + - name: Block Storage Actions + description: |- + Block storage actions are commands that can be given to a DigitalOcean + Block Storage Volume. An example would be detaching or attaching a volume + from a Droplet. These requests are made on the + `/v2/volumes/$VOLUME_ID/actions` endpoint. + + An action object is returned. These objects hold the current status of the + requested action. + + - name: CDN Endpoints + description: |- + Content hosted in DigitalOcean's object storage solution, + [Spaces](https://www.digitalocean.com/docs/spaces/overview/), + can optionally be served by our globally distributed Content Delivery + Network (CDN). By sending requests to `/v2/cdn/endpoints`, you can list, + create, or delete CDN Endpoints as well as purge cached content. To use a + custom subdomain to access the CDN Endpoint, provide the ID of a + DigitalOcean managed TLS certificate and the fully qualified domain name + for the custom subdomain. + + - name: Certificates + description: |- + In order to perform SSL termination on load balancers, DigitalOcean offers + two types of [SSL certificate management](https://www.digitalocean.com/docs/accounts/security/#certificates): + + * **Custom**: User-generated certificates may be uploaded to DigitalOcean + where they will be placed in a fully encrypted and isolated storage system. + + * **Let's Encrypt**: Certificates may be automatically generated by + DigitalOcean utilizing an integration with Let's Encrypt, the free and + open certificate authority. These certificates will also be automatically + renewed as required. + + - name: Container Registry + description: |- + DigitalOcean offers the ability for you to create a + [private container registry](https://www.digitalocean.com/docs/images/container-registry/quickstart/) + to store your Docker images for use with your Kubernetes clusters. This + container registry runs inside the same datacenters as your cluster, + ensuring reliable and performant rollout of image deployments. + + You can only create one registry per DigitalOcean account, but you can use + that registry to create as many repositories as you wish. + + - name: Databases + description: |- + DigitalOcean's [managed database service](https://www.digitalocean.com/docs/databases) + simplifies the creation and management of highly available database clusters. Currently, it + offers support for [PostgreSQL](http://www.digitalocean.com/docs/databases/postgresql/), + [Redis](https://www.digitalocean.com/docs/databases/redis/), + [MySQL](https://www.digitalocean.com/docs/databases/mysql/), and + [MongoDB](https://www.digitalocean.com/docs/databases/mongodb/). + + By sending requests to the `/v2/databases` endpoint, you can list, create, or delete + database clusters as well as scale the size of a cluster, add or remove read-only replicas, + and manage other configuration details. + + Database clusters may be deployed in a multi-node, high-availability configuration. + If your machine type is above the basic nodes, your node plan is above the smallest option, + or you are running MongoDB, you may additionally include up to two standby nodes in your cluster. + + The size of individual nodes in a database cluster is represented by a human-readable slug, + which is used in some of the following requests. Each slug denotes the node's identifier, + CPU count, and amount of RAM, in that order. + + For **Basic nodes**, reference the following table for its slug: + + Slug | CPU | RAM + -------------------|---------|--------- + db-s-1vcpu-1gb | 1 vCPU | 1 GB + db-s-1vcpu-2gb | 1 vCPU | 2 GB + db-s-2vcpu-4gb | 2 vCPU | 4 GB + db-s-4vcpu-8gb | 4 vCPU | 8 GB + db-s-6vcpu-16gb | 6 vCPU | 16 GB + db-s-8vcpu-32gb | 8 vCPU | 32 GB + db-s-16vcpu-64gb | 16 vCPU | 64 GB + + For **General Purpose nodes**, reference the following table for its slug: + + Slug | CPU | RAM + -------------------|---------|--------- + gd-2vcpu-8gb | 2 vCPU | 8 GB + gd-4vcpu-16gb | 4 vCPU | 16 GB + gd-8vcpu-32gb | 8 vCPU | 32 GB + gd-16vcpu-64gb | 16 vCPU | 64 GB + gd-32vcpu-128gb | 32 vCPU | 128 GB + gd-40vcpu-160gb | 40 vCPU | 160 GB + + For **Storage-Optimized nodes**, reference the following table for its slug: + + Slug | CPU | RAM + -------------------|---------|--------- + so1_5-2vcpu-16gb | 2 vCPU | 16 GB + so1_5-4vcpu-32gb | 4 vCPU | 32 GB + so1_5-8vcpu-64gb | 8 vCPU | 64 GB + so1_5-16vcpu-128gb | 16 vCPU | 128 GB + so1_5-24vcpu-192gb | 24 vCPU | 192 GB + so1_5-32vcpu-256gb | 32 vCPU | 256 GB + + For **Memory-Optimized nodes**, reference the following table for its slug: + + Slug | CPU | RAM + -------------------|---------|--------- + m-2vcpu-16gb | 2 vCPU | 16 GB + m-4vcpu-32gb | 4 vCPU | 32 GB + m-8vcpu-64gb | 8 vCPU | 64 GB + m-16vcpu-128gb | 16 vCPU | 128 GB + m-24vcpu-192gb | 24 vCPU | 192 GB + m-32vcpu-256gb | 32 vCPU | 256 GB + + - name: Domain Records + description: |- + Domain record resources are used to set or retrieve information about the + individual DNS records configured for a domain. This allows you to build + and manage DNS zone files by adding and modifying individual records for a + domain. + + The [DigitalOcean DNS management interface](https://www.digitalocean.com/docs/networking/dns/) + allows you to configure the following DNS records: + + Name | Description | + ------|----------------------------------------------------------------------------------------------------------------------------------------------------| + A | This record type is used to map an IPv4 address to a hostname. | + AAAA | This record type is used to map an IPv6 address to a hostname. | + CAA | As specified in RFC-6844, this record type can be used to restrict which certificate authorities are permitted to issue certificates for a domain. | + CNAME | This record type defines an alias for your canonical hostname (the one defined by an A or AAAA record). | + MX | This record type is used to define the mail exchanges used for the domain. | + NS | This record type defines the name servers that are used for this zone. | + TXT | This record type is used to associate a string of text with a hostname, primarily used for verification. | + SRV | This record type specifies the location (hostname and port number) of servers for specific services. | + SOA | This record type defines administrative information about the zone. Can only have ttl changed, cannot be deleted | + + - name: Domains + description: |- + Domain resources are domain names that you have purchased from a domain + name registrar that you are managing through the + [DigitalOcean DNS interface](https://www.digitalocean.com/docs/networking/dns/). + + This resource establishes top-level control over each domain. Actions that + affect individual domain records should be taken on the + [Domain Records](#tag/Domain-Records) resource. + + - name: Droplet Actions + description: |- + Droplet actions are tasks that can be executed on a Droplet. These can be + things like rebooting, resizing, snapshotting, etc. + + Droplet action requests are generally targeted at one of the "actions" + endpoints for a specific Droplet. The specific actions are usually + initiated by sending a POST request with the action and arguments as + parameters. + + Droplet action requests create a Droplet actions object, which can be used + to get information about the status of an action. Creating a Droplet + action is asynchronous: the HTTP call will return the action object before + the action has finished processing on the Droplet. The current status of + an action can be retrieved from either the Droplet actions endpoint or the + global actions endpoint. If a Droplet action is uncompleted it may block + the creation of a subsequent action for that Droplet, the locked attribute + of the Droplet will be true and attempts to create a Droplet action will + fail with a status of 422. + + - name: Droplets + description: |- + A [Droplet](https://www.digitalocean.com/docs/droplets/) is a DigitalOcean + virtual machine. By sending requests to the Droplet endpoint, you can + list, create, or delete Droplets. + + Some of the attributes will have an object value. The `region` and `image` + objects will all contain the standard attributes of their associated + types. Find more information about each of these objects in their + respective sections. + + - name: Firewalls + description: |- + [DigitalOcean Cloud Firewalls](https://www.digitalocean.com/docs/networking/firewalls/) + provide the ability to restrict network access to and from a Droplet + allowing you to define which ports will accept inbound or outbound + connections. By sending requests to the `/v2/firewalls` endpoint, you can + list, create, or delete firewalls as well as modify access rules. + + - name: Floating IP Actions + description: |- + As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs). + The Reserved IP product's endpoints function the exact same way as Floating IPs. + The only difference is the name change throughout the URLs and fields. + For example, the `floating_ips` field is now the `reserved_ips` field. + The Floating IP endpoints will remain active until fall 2023 before being + permanently deprecated. + + With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects), + we will reflect this change as an additional field in the responses across the API + where the `floating_ip` field is used. For example, the Droplet metadata response + will contain the field `reserved_ips` in addition to the `floating_ips` field. + Floating IPs retrieved using the Projects API will retain the original name. + + Floating IP actions are commands that can be given to a DigitalOcean + floating IP. These requests are made on the actions endpoint of a specific + floating IP. + + An action object is returned. These objects hold the current status of the + requested action. + + - name: Floating IPs + description: |- + As of 16 June 2022, we have renamed the Floating IP product to [Reserved IPs](https://docs.digitalocean.com/reference/api/api-reference/#tag/Reserved-IPs). + The Reserved IP product's endpoints function the exact same way as Floating IPs. + The only difference is the name change throughout the URLs and fields. + For example, the `floating_ips` field is now the `reserved_ips` field. + The Floating IP endpoints will remain active until fall 2023 before being + permanently deprecated. + + With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects), + we will reflect this change as an additional field in the responses across the API + where the `floating_ip` field is used. For example, the Droplet metadata response + will contain the field `reserved_ips` in addition to the `floating_ips` field. + Floating IPs retrieved using the Projects API will retain the original name. + + [DigitalOcean Floating IPs](https://www.digitalocean.com/docs/networking/floating-ips/) + are publicly-accessible static IP addresses that can be mapped to one of + your Droplets. They can be used to create highly available setups or other + configurations requiring movable addresses. + + Floating IPs are bound to a specific region. + + - name: Functions + description: |- + [Serverless functions](https://docs.digitalocean.com/products/functions) are blocks of code that run on demand without the need to manage any infrastructure. + You can develop functions on your local machine and then deploy them to a namespace using `doctl`, the [official DigitalOcean CLI tool](https://docs.digitalocean.com/reference/doctl). + + The Serverless Functions API currently only supports creating and managing namespaces. + + - name: Image Actions + description: |- + Image actions are commands that can be given to a DigitalOcean image. In + general, these requests are made on the actions endpoint of a specific + image. + + An image action object is returned. These objects hold the current status + of the requested action. + + - name: Images + description: |- + A DigitalOcean [image](https://www.digitalocean.com/docs/images/) can be + used to create a Droplet and may come in a number of flavors. Currently, + there are five types of images: snapshots, backups, applications, + distributions, and custom images. + + * [Snapshots](https://www.digitalocean.com/docs/images/snapshots/) provide + a full copy of an existing Droplet instance taken on demand. + + * [Backups](https://www.digitalocean.com/docs/images/backups/) are similar + to snapshots but are created automatically at regular intervals when + enabled for a Droplet. + + * [Custom images](https://www.digitalocean.com/docs/images/custom-images/) + are Linux-based virtual machine images (raw, qcow2, vhdx, vdi, and vmdk + formats are supported) that you may upload for use on DigitalOcean. + + * Distributions are the public Linux distributions that are available to + be used as a base to create Droplets. + + * Applications, or [1-Click Apps](https://www.digitalocean.com/docs/one-clicks/), + are distributions pre-configured with additional software. + + To interact with images, you will generally send requests to the images + endpoint at /v2/images. + + - name: Kubernetes + description: |- + [DigitalOcean Kubernetes](https://www.digitalocean.com/docs/kubernetes/) + allows you to quickly deploy scalable and secure Kubernetes clusters. By + sending requests to the `/v2/kubernetes/clusters` endpoint, you can list, + create, or delete clusters as well as scale node pools up and down, + recycle individual nodes, and retrieve the kubeconfig file for use with + a cluster. + + - name: Load Balancers + description: |- + [DigitalOcean Load Balancers](https://www.digitalocean.com/docs/networking/load-balancers/) + provide a way to distribute traffic across multiple Droplets. By sending + requests to the `/v2/load_balancers` endpoint, you can list, create, or + delete load balancers as well as add or remove Droplets, forwarding rules, + and other configuration details. + + - name: Monitoring + description: |- + The DigitalOcean Monitoring API makes it possible to programmatically retrieve metrics as well as configure alert + policies based on these metrics. The Monitoring API can help you gain insight into how your apps are performing + and consuming resources. + + - name: Project Resources + description: |- + Project Resources are resources that can be grouped into your projects. + You can group resources (like Droplets, Spaces, load balancers, domains, + and floating IPs) in ways that align with the applications you host on + DigitalOcean. + + ### Supported Resource Types Examples + + Projects resources are identified by uniform resource names or URNs. A + valid URN has the following format: `do:resource_type:resource_id`. The + following resource types are supported: + + Resource Type | Example URN + -------------------|------------ + App Platform App | `do:app:be5aab85-851b-4cab-b2ed-98d5a63ba4e8` + Database | `do:dbaas:83c7a55f-0d84-4760-9245-aba076ec2fb2` + Domain | `do:domain:example.com` + Droplet | `do:droplet:4126873` + Floating IP | `do:floatingip:192.168.99.100` + Kubernetes Cluster | `do:kubernetes:bd5f5959-5e1e-4205-a714-a914373942af` + Load Balancer | `do:loadbalancer:39052d89-8dd4-4d49-8d5a-3c3b6b365b5b` + Space | `do:space:my-website-assets` + Volume | `do:volume:6fc4c277-ea5c-448a-93cd-dd496cfef71f` + + ### Resource Status Codes + + When assigning and retrieving resources in projects, a `status` attribute + is returned that indicates if a resource was successfully retrieved or + assigned. The status codes can be one of the following: + + Status Code | Explanation + -------------------|------------ + `ok` | There was no problem retrieving or assigning a resource. + `not_found` | The resource was not found. + `assigned` | The resource was successfully assigned. + `already_assigned` | The resource was already assigned. + `service_down` | There was a problem retrieving or assigning a resource. Please try again. + + - name: Projects + description: |- + Projects allow you to organize your resources into groups that fit the way + you work. You can group resources (like Droplets, Spaces, load balancers, + domains, and floating IPs) in ways that align with the applications + you host on DigitalOcean. + + - name: Regions + description: Provides information about DigitalOcean data center regions. + + - name: Reserved IP Actions + description: |- + As of 16 June 2022, we have renamed the [Floating IP](https://docs.digitalocean.com/reference/api/api-reference/#tag/Floating-IPs) + product to Reserved IPs. The Reserved IP product's endpoints function the exact + same way as Floating IPs. The only difference is the name change throughout the + URLs and fields. For example, the `floating_ips` field is now the `reserved_ips` field. + The Floating IP endpoints will remain active until fall 2023 before being + permanently deprecated. + + With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects), + we will reflect this change as an additional field in the responses across the API + where the `floating_ip` field is used. For example, the Droplet metadata response + will contain the field `reserved_ips` in addition to the `floating_ips` field. + Floating IPs retrieved using the Projects API will retain the original name. + + Reserved IP actions are commands that can be given to a DigitalOcean + reserved IP. These requests are made on the actions endpoint of a specific + reserved IP. + + An action object is returned. These objects hold the current status of the + requested action. + + - name: Reserved IPs + description: |- + As of 16 June 2022, we have renamed the [Floating IP](https://docs.digitalocean.com/reference/api/api-reference/#tag/Floating-IPs) + product to Reserved IPs. The Reserved IP product's endpoints function the exact + same way as Floating IPs. The only difference is the name change throughout the + URLs and fields. For example, the `floating_ips` field is now the `reserved_ips` field. + The Floating IP endpoints will remain active until fall 2023 before being + permanently deprecated. + + With the exception of the [Projects API](https://docs.digitalocean.com/reference/api/api-reference/#tag/Projects), + we will reflect this change as an additional field in the responses across the API + where the `floating_ip` field is used. For example, the Droplet metadata response + will contain the field `reserved_ips` in addition to the `floating_ips` field. + Floating IPs retrieved using the Projects API will retain the original name. + + DigitalOcean Reserved IPs are publicly-accessible static IP addresses that can be + mapped to one of your Droplets. They can be used to create highly available + setups or other configurations requiring movable addresses. + + Reserved IPs are bound to a specific region. + + - name: Sizes + description: |- + The sizes objects represent different packages of hardware resources that + can be used for Droplets. When a Droplet is created, a size must be + selected so that the correct resources can be allocated. + + Each size represents a plan that bundles together specific sets of + resources. This includes the amount of RAM, the number of virtual CPUs, + disk space, and transfer. The size object also includes the pricing + details and the regions that the size is available in. + + - name: Snapshots + description: |- + [Snapshots](https://www.digitalocean.com/docs/images/snapshots/) are saved + instances of a Droplet or a block storage volume, which is reflected in + the `resource_type` attribute. In order to avoid problems with compressing + filesystems, each defines a `min_disk_size` attribute which is the minimum + size of the Droplet or volume disk when creating a new resource from the + saved snapshot. + + To interact with snapshots, you will generally send requests to the + snapshots endpoint at `/v2/snapshots`. + + - name: SSH Keys + description: Manage SSH keys available on your account. + + - name: Tags + description: |- + A tag is a label that can be applied to a resource (currently Droplets, + Images, Volumes, Volume Snapshots, and Database clusters) in order to + better organize or facilitate the lookups and actions on it. + + Tags have two attributes: a user defined `name` attribute and an embedded + `resources` attribute with information about resources that have been tagged. + + + - name: Uptime + description: >- + [DigitalOcean Uptime Checks](https://docs.digitalocean.com/products/uptime/) provide the ability to monitor your endpoints from around the world, and alert you when they're slow, unavailable, or SSL certificates are expiring. + + To interact with Uptime, you will generally send requests to the Uptime endpoint at `/v2/uptime/`. + + - name: VPCs + description: |- + [VPCs (virtual private clouds)](https://www.digitalocean.com/docs/networking/vpc/) + allow you to create virtual networks containing resources that can + communicate with each other in full isolation using private IP addresses. + + By sending requests to the `/v2/vpcs` endpoint, you can create, configure, + list, and delete custom VPCs as well as retrieve information about the + resources assigned to them. + +paths: + /v2/1-clicks: + get: + $ref: 'resources/1-clicks/oneClicks_list.yml' + + /v2/1-clicks/kubernetes: + post: + $ref: 'resources/1-clicks/oneClicks_install_kubernetes.yml' + + /v2/account: + get: + $ref: 'resources/account/account_get.yml' + + /v2/account/keys: + get: + $ref: 'resources/ssh_keys/sshKeys_list.yml' + + post: + $ref: 'resources/ssh_keys/sshKeys_create.yml' + + /v2/account/keys/{ssh_key_identifier}: + get: + $ref: 'resources/ssh_keys/sshKeys_get.yml' + + put: + $ref: 'resources/ssh_keys/sshKeys_update.yml' + + delete: + $ref: 'resources/ssh_keys/sshKeys_delete.yml' + + /v2/actions: + get: + $ref: 'resources/actions/actions_list.yml' + + /v2/actions/{action_id}: + get: + $ref: 'resources/actions/actions_get.yml' + + /v2/apps: + get: + $ref: 'resources/apps/apps_list.yml' + post: + $ref: 'resources/apps/apps_create.yml' + + /v2/apps/{id}: + delete: + $ref: 'resources/apps/apps_delete.yml' + get: + $ref: 'resources/apps/apps_get.yml' + put: + $ref: 'resources/apps/apps_update.yml' + + /v2/apps/{app_id}/components/{component_name}/logs: + get: + $ref: 'resources/apps/apps_get_logs_active_deployment.yml' + + /v2/apps/{app_id}/deployments: + get: + $ref: 'resources/apps/apps_list_deployments.yml' + post: + $ref: 'resources/apps/apps_create_deployment.yml' + + /v2/apps/{app_id}/deployments/{deployment_id}: + get: + $ref: 'resources/apps/apps_get_deployment.yml' + + /v2/apps/{app_id}/deployments/{deployment_id}/cancel: + post: + $ref: 'resources/apps/apps_cancel_deployment.yml' + + /v2/apps/{app_id}/deployments/{deployment_id}/components/{component_name}/logs: + get: + $ref: 'resources/apps/apps_get_logs.yml' + + /v2/apps/{app_id}/deployments/{deployment_id}/logs: + get: + $ref: 'resources/apps/apps_get_logs_aggregate.yml' + + /v2/apps/{app_id}/logs: + get: + $ref: 'resources/apps/apps_get_logs_active_deployment_aggregate.yml' + + /v2/apps/tiers: + get: + $ref: 'resources/apps/apps_list_tiers.yml' + + /v2/apps/tiers/{slug}: + get: + $ref: 'resources/apps/apps_get_tier.yml' + + /v2/apps/tiers/instance_sizes: + get: + $ref: 'resources/apps/apps_list_instanceSizes.yml' + + /v2/apps/tiers/instance_sizes/{slug}: + get: + $ref: 'resources/apps/apps_get_instanceSize.yml' + + /v2/apps/regions: + get: + $ref: 'resources/apps/apps_list_regions.yml' + + /v2/apps/propose: + post: + $ref: 'resources/apps/apps_validate_appSpec.yml' + + /v2/apps/{app_id}/alerts: + get: + $ref: 'resources/apps/apps_list_alerts.yml' + + /v2/apps/{app_id}/alerts/{alert_id}/destinations: + post: + $ref: 'resources/apps/apps_assign_alertDestinations.yml' + + /v2/apps/{app_id}/rollback: + post: + $ref: 'resources/apps/apps_create_rollback.yml' + + /v2/apps/{app_id}/rollback/validate: + post: + $ref: 'resources/apps/apps_validate_rollback.yml' + + /v2/apps/{app_id}/rollback/commit: + post: + $ref: 'resources/apps/apps_commit_rollback.yml' + + /v2/apps/{app_id}/rollback/revert: + post: + $ref: 'resources/apps/apps_revert_rollback.yml' + + /v2/apps/{app_id}/metrics/bandwidth_daily: + get: + $ref: 'resources/apps/apps_get_metrics_bandwidth_usage.yml' + + /v2/apps/metrics/bandwidth_daily: + post: + $ref: 'resources/apps/apps_list_metrics_bandwidth_usage.yml' + + /v2/cdn/endpoints: + get: + $ref: 'resources/cdn/cdn_list_endpoints.yml' + + post: + $ref: 'resources/cdn/cdn_create_endpoint.yml' + + /v2/cdn/endpoints/{cdn_id}: + get: + $ref: 'resources/cdn/cdn_get_endpoint.yml' + + put: + $ref: 'resources/cdn/cdn_update_endpoint.yml' + + delete: + $ref: 'resources/cdn/cdn_delete_endpoint.yml' + + /v2/cdn/endpoints/{cdn_id}/cache: + delete: + $ref: 'resources/cdn/cdn_purge_cache.yml' + + /v2/certificates: + get: + $ref: 'resources/certificates/certificates_list.yml' + + post: + $ref: 'resources/certificates/certificates_create.yml' + + /v2/certificates/{certificate_id}: + get: + $ref: 'resources/certificates/certificates_get.yml' + + delete: + $ref: 'resources/certificates/certificates_delete.yml' + + /v2/customers/my/balance: + get: + $ref: 'resources/billing/balance_get.yml' + + /v2/customers/my/billing_history: + get: + $ref: 'resources/billing/billingHistory_list.yml' + + /v2/customers/my/invoices: + get: + $ref: 'resources/billing/invoices_list.yml' + + /v2/customers/my/invoices/{invoice_uuid}: + get: + $ref: 'resources/billing/invoices_get_byUUID.yml' + + /v2/customers/my/invoices/{invoice_uuid}/csv: + get: + $ref: 'resources/billing/invoices_get_csvByUUID.yml' + + /v2/customers/my/invoices/{invoice_uuid}/pdf: + get: + $ref: 'resources/billing/invoices_get_pdfByUUID.yml' + + /v2/customers/my/invoices/{invoice_uuid}/summary: + get: + $ref: 'resources/billing/invoices_get_summaryByUUID.yml' + + /v2/databases/options: + get: + $ref: 'resources/databases/databases_list_options.yml' + + /v2/databases: + get: + $ref: 'resources/databases/databases_list_clusters.yml' + post: + $ref: 'resources/databases/databases_create_cluster.yml' + + /v2/databases/{database_cluster_uuid}: + get: + $ref: 'resources/databases/databases_get_cluster.yml' + delete: + $ref: 'resources/databases/databases_destroy_cluster.yml' + + /v2/databases/{database_cluster_uuid}/config: + get: + $ref: 'resources/databases/databases_get_config.yml' + patch: + $ref: 'resources/databases/databases_patch_config.yml' + + /v2/databases/{database_cluster_uuid}/ca: + get: + $ref: 'resources/databases/databases_get_ca.yml' + + /v2/databases/{database_cluster_uuid}/online-migration: + get: + $ref: 'resources/databases/databases_get_migrationStatus.yml' + put: + $ref: 'resources/databases/databases_update_onlineMigration.yml' + + /v2/databases/{database_cluster_uuid}/online-migration/{migration_id}: + delete: + $ref: 'resources/databases/databases_delete_onlineMigration.yml' + + /v2/databases/{database_cluster_uuid}/migrate: + put: + $ref: 'resources/databases/databases_update_region.yml' + + /v2/databases/{database_cluster_uuid}/resize: + put: + $ref: 'resources/databases/databases_update_clusterSize.yml' + + /v2/databases/{database_cluster_uuid}/firewall: + get: + $ref: 'resources/databases/databases_list_firewall_rules.yml' + put: + $ref: 'resources/databases/databases_update_firewall_rules.yml' + + /v2/databases/{database_cluster_uuid}/maintenance: + put: + $ref: 'resources/databases/databases_update_maintenanceWindow.yml' + + /v2/databases/{database_cluster_uuid}/backups: + get: + $ref: 'resources/databases/databases_list_backups.yml' + + /v2/databases/{database_cluster_uuid}/replicas: + get: + $ref: 'resources/databases/databases_list_replicas.yml' + post: + $ref: 'resources/databases/databases_create_replica.yml' + + /v2/databases/{database_cluster_uuid}/replicas/{replica_name}: + get: + $ref: 'resources/databases/databases_get_replica.yml' + delete: + $ref: 'resources/databases/databases_destroy_replica.yml' + + /v2/databases/{database_cluster_uuid}/replicas/{replica_name}/promote: + put: + $ref: 'resources/databases/databases_promote_replica.yml' + + /v2/databases/{database_cluster_uuid}/users: + get: + $ref: 'resources/databases/databases_list_users.yml' + post: + $ref: 'resources/databases/databases_add_user.yml' + + /v2/databases/{database_cluster_uuid}/users/{username}: + get: + $ref: 'resources/databases/databases_get_user.yml' + delete: + $ref: 'resources/databases/databases_delete_user.yml' + + /v2/databases/{database_cluster_uuid}/users/{username}/reset_auth: + post: + $ref: 'resources/databases/databases_reset_auth.yml' + + /v2/databases/{database_cluster_uuid}/dbs: + get: + $ref: 'resources/databases/databases_list.yml' + post: + $ref: 'resources/databases/databases_add.yml' + + /v2/databases/{database_cluster_uuid}/dbs/{database_name}: + get: + $ref: 'resources/databases/databases_get.yml' + delete: + $ref: 'resources/databases/databases_delete.yml' + + /v2/databases/{database_cluster_uuid}/pools: + get: + $ref: 'resources/databases/databases_list_connectionPools.yml' + post: + $ref: 'resources/databases/databases_add_connectionPool.yml' + + /v2/databases/{database_cluster_uuid}/pools/{pool_name}: + get: + $ref: 'resources/databases/databases_get_connectionPool.yml' + put: + $ref: 'resources/databases/databases_update_connectionPool.yml' + delete: + $ref: 'resources/databases/databases_delete_connectionPool.yml' + + /v2/databases/{database_cluster_uuid}/eviction_policy: + get: + $ref: 'resources/databases/databases_get_evictionPolicy.yml' + put: + $ref: 'resources/databases/databases_update_evictionPolicy.yml' + + /v2/databases/{database_cluster_uuid}/sql_mode: + get: + $ref: 'resources/databases/databases_get_sql_mode.yml' + put: + $ref: 'resources/databases/databases_update_sql_mode.yml' + + /v2/databases/{database_cluster_uuid}/upgrade: + put: + $ref: 'resources/databases/databases_upgrade_major_version.yml' + + /v2/domains: + get: + $ref: 'resources/domains/domains_list.yml' + post: + $ref: 'resources/domains/domains_create.yml' + + /v2/domains/{domain_name}: + get: + $ref: 'resources/domains/domains_get.yml' + delete: + $ref: 'resources/domains/domains_delete.yml' + + /v2/domains/{domain_name}/records: + get: + $ref: 'resources/domains/domains_list_records.yml' + post: + $ref: 'resources/domains/domains_create_record.yml' + + /v2/domains/{domain_name}/records/{domain_record_id}: + get: + $ref: 'resources/domains/domains_get_record.yml' + patch: + $ref: 'resources/domains/domains_patch_record.yml' + put: + $ref: 'resources/domains/domains_update_record.yml' + delete: + $ref: 'resources/domains/domains_delete_record.yml' + + /v2/droplets: + get: + $ref: 'resources/droplets/droplets_list.yml' + + post: + $ref: 'resources/droplets/droplets_create.yml' + + delete: + $ref: 'resources/droplets/droplets_destroy_byTag.yml' + + /v2/droplets/{droplet_id}: + get: + $ref: 'resources/droplets/droplets_get.yml' + + delete: + $ref: 'resources/droplets/droplets_destroy.yml' + + /v2/droplets/{droplet_id}/backups: + get: + $ref: 'resources/droplets/droplets_list_backups.yml' + + /v2/droplets/{droplet_id}/snapshots: + get: + $ref: 'resources/droplets/droplets_list_snapshots.yml' + + /v2/droplets/{droplet_id}/actions: + get: + $ref: 'resources/droplets/dropletActions_list.yml' + post: + $ref: 'resources/droplets/dropletActions_post.yml' + + /v2/droplets/actions: + post: + $ref: 'resources/droplets/dropletActions_post_byTag.yml' + + /v2/droplets/{droplet_id}/actions/{action_id}: + get: + $ref: 'resources/droplets/dropletActions_get.yml' + + /v2/droplets/{droplet_id}/kernels: + get: + $ref: 'resources/droplets/droplets_list_kernels.yml' + + /v2/droplets/{droplet_id}/firewalls: + get: + $ref: 'resources/droplets/droplets_list_firewalls.yml' + + /v2/droplets/{droplet_id}/neighbors: + get: + $ref: 'resources/droplets/droplets_list_neighbors.yml' + + /v2/droplets/{droplet_id}/destroy_with_associated_resources: + get: + $ref: 'resources/droplets/droplets_list_associatedResources.yml' + + /v2/droplets/{droplet_id}/destroy_with_associated_resources/selective: + delete: + $ref: 'resources/droplets/droplets_destroy_withAssociatedResourcesSelective.yml' + + /v2/droplets/{droplet_id}/destroy_with_associated_resources/dangerous: + delete: + $ref: 'resources/droplets/droplets_destroy_withAssociatedResourcesDangerous.yml' + + /v2/droplets/{droplet_id}/destroy_with_associated_resources/status: + get: + $ref: 'resources/droplets/droplets_get_destroyAssociatedResourcesStatus.yml' + + /v2/droplets/{droplet_id}/destroy_with_associated_resources/retry: + post: + $ref: 'resources/droplets/droplets_destroy_retryWithAssociatedResources.yml' + + /v2/firewalls: + get: + $ref: 'resources/firewalls/firewalls_list.yml' + post: + $ref: 'resources/firewalls/firewalls_create.yml' + + /v2/firewalls/{firewall_id}: + get: + $ref: 'resources/firewalls/firewalls_get.yml' + put: + $ref: 'resources/firewalls/firewalls_update.yml' + delete: + $ref: 'resources/firewalls/firewalls_delete.yml' + + /v2/firewalls/{firewall_id}/droplets: + post: + $ref: 'resources/firewalls/firewalls_assign_droplets.yml' + delete: + $ref: 'resources/firewalls/firewalls_delete_droplets.yml' + + /v2/firewalls/{firewall_id}/tags: + post: + $ref: 'resources/firewalls/firewalls_add_tags.yml' + delete: + $ref: 'resources/firewalls/firewalls_delete_tags.yml' + + /v2/firewalls/{firewall_id}/rules: + post: + $ref: 'resources/firewalls/firewalls_add_rules.yml' + delete: + $ref: 'resources/firewalls/firewalls_delete_rules.yml' + + /v2/floating_ips: + get: + $ref: 'resources/floating_ips/floatingIPs_list.yml' + + post: + $ref: 'resources/floating_ips/floatingIPs_create.yml' + + /v2/floating_ips/{floating_ip}: + get: + $ref: 'resources/floating_ips/floatingIPs_get.yml' + + delete: + $ref: 'resources/floating_ips/floatingIPs_delete.yml' + + /v2/floating_ips/{floating_ip}/actions: + get: + $ref: 'resources/floating_ips/floatingIPsAction_list.yml' + + post: + $ref: 'resources/floating_ips/floatingIPsAction_post.yml' + + /v2/floating_ips/{floating_ip}/actions/{action_id}: + get: + $ref: 'resources/floating_ips/floatingIPsAction_get.yml' + + /v2/functions/namespaces: + get: + $ref: 'resources/functions/functions_list_namespaces.yml' + + post: + $ref: 'resources/functions/functions_create_namespace.yml' + + /v2/functions/namespaces/{namespace_id}: + get: + $ref: 'resources/functions/functions_get_namespace.yml' + + delete: + $ref: 'resources/functions/functions_delete_namespace.yml' + + /v2/functions/namespaces/{namespace_id}/triggers: + get: + $ref: 'resources/functions/functions_list_triggers.yml' + + post: + $ref: 'resources/functions/functions_create_trigger.yml' + + /v2/functions/namespaces/{namespace_id}/triggers/{trigger_name}: + get: + $ref: 'resources/functions/functions_get_trigger.yml' + + put: + $ref: 'resources/functions/functions_update_trigger.yml' + + delete: + $ref: 'resources/functions/functions_delete_trigger.yml' + + /v2/images: + get: + $ref: 'resources/images/images_list.yml' + post: + $ref: 'resources/images/images_create_custom.yml' + + /v2/images/{image_id}: + get: + $ref: 'resources/images/images_get.yml' + put: + $ref: 'resources/images/images_update.yml' + delete: + $ref: 'resources/images/images_delete.yml' + + /v2/images/{image_id}/actions: + get: + $ref: 'resources/images/imageActions_list.yml' + post: + $ref: 'resources/images/imageActions_post.yml' + + /v2/images/{image_id}/actions/{action_id}: + get: + $ref: 'resources/images/imageActions_get.yml' + + /v2/kubernetes/clusters: + get: + $ref: 'resources/kubernetes/kubernetes_list_clusters.yml' + + post: + $ref: 'resources/kubernetes/kubernetes_create_cluster.yml' + + /v2/kubernetes/clusters/{cluster_id}: + get: + $ref: 'resources/kubernetes/kubernetes_get_cluster.yml' + + put: + $ref: 'resources/kubernetes/kubernetes_update_cluster.yml' + + delete: + $ref: 'resources/kubernetes/kubernetes_delete_cluster.yml' + + /v2/kubernetes/clusters/{cluster_id}/destroy_with_associated_resources: + get: + $ref: 'resources/kubernetes/kubernetes_list_associatedResources.yml' + + /v2/kubernetes/clusters/{cluster_id}/destroy_with_associated_resources/selective: + delete: + $ref: 'resources/kubernetes/kubernetes_destroy_associatedResourcesSelective.yml' + + /v2/kubernetes/clusters/{cluster_id}/destroy_with_associated_resources/dangerous: + delete: + $ref: 'resources/kubernetes/kubernetes_destroy_associatedResourcesDangerous.yml' + + /v2/kubernetes/clusters/{cluster_id}/kubeconfig: + get: + $ref: 'resources/kubernetes/kubernetes_get_kubeconfig.yml' + + /v2/kubernetes/clusters/{cluster_id}/credentials: + get: + $ref: 'resources/kubernetes/kubernetes_get_credentials.yml' + + /v2/kubernetes/clusters/{cluster_id}/upgrades: + get: + $ref: 'resources/kubernetes/kubernetes_get_availableUpgrades.yml' + + /v2/kubernetes/clusters/{cluster_id}/upgrade: + post: + $ref: 'resources/kubernetes/kubernetes_upgrade_cluster.yml' + + /v2/kubernetes/clusters/{cluster_id}/node_pools: + get: + $ref: 'resources/kubernetes/kubernetes_list_nodePools.yml' + + post: + $ref: 'resources/kubernetes/kubernetes_add_nodePool.yml' + + /v2/kubernetes/clusters/{cluster_id}/node_pools/{node_pool_id}: + get: + $ref: 'resources/kubernetes/kubernetes_get_nodePool.yml' + + put: + $ref: 'resources/kubernetes/kubernetes_update_nodePool.yml' + + delete: + $ref: 'resources/kubernetes/kubernetes_delete_nodePool.yml' + + /v2/kubernetes/clusters/{cluster_id}/node_pools/{node_pool_id}/nodes/{node_id}: + delete: + $ref: 'resources/kubernetes/kubernetes_delete_node.yml' + + /v2/kubernetes/clusters/{cluster_id}/node_pools/{node_pool_id}/recycle: + post: + $ref: 'resources/kubernetes/kubernetes_recycle_nodePool.yml' + + /v2/kubernetes/clusters/{cluster_id}/user: + get: + $ref: 'resources/kubernetes/kubernetes_get_clusterUser.yml' + + /v2/kubernetes/options: + get: + $ref: 'resources/kubernetes/kubernetes_list_options.yml' + + /v2/kubernetes/clusters/{cluster_id}/clusterlint: + post: + $ref: 'resources/kubernetes/kubernetes_run_clusterLint.yml' + + get: + $ref: 'resources/kubernetes/kubernetes_get_clusterLintResults.yml' + + /v2/kubernetes/registry: + post: + $ref: 'resources/kubernetes/kubernetes_add_registry.yml' + + delete: + $ref: 'resources/kubernetes/kubernetes_remove_registry.yml' + + /v2/load_balancers: + post: + $ref: 'resources/load_balancers/loadBalancers_create.yml' + + get: + $ref: 'resources/load_balancers/loadBalancers_list.yml' + + /v2/load_balancers/{lb_id}: + get: + $ref: 'resources/load_balancers/loadBalancers_get.yml' + + put: + $ref: 'resources/load_balancers/loadBalancers_update.yml' + + delete: + $ref: 'resources/load_balancers/loadBalancers_delete.yml' + + /v2/load_balancers/{lb_id}/droplets: + post: + $ref: 'resources/load_balancers/loadBalancers_add_droplets.yml' + + delete: + $ref: 'resources/load_balancers/loadBalancers_remove_droplets.yml' + + /v2/load_balancers/{lb_id}/forwarding_rules: + post: + $ref: 'resources/load_balancers/loadBalancers_add_forwardingRules.yml' + + delete: + $ref: 'resources/load_balancers/loadBalancers_remove_forwardingRules.yml' + + /v2/monitoring/alerts: + get: + $ref: 'resources/monitoring/monitoring_list_alertPolicy.yml' + + post: + $ref: 'resources/monitoring/monitoring_create_alertPolicy.yml' + + /v2/monitoring/alerts/{alert_uuid}: + get: + $ref: 'resources/monitoring/monitoring_get_alertPolicy.yml' + + put: + $ref: 'resources/monitoring/monitoring_update_alertPolicy.yml' + + delete: + $ref: 'resources/monitoring/monitoring_delete_alertPolicy.yml' + + /v2/monitoring/metrics/droplet/bandwidth: + get: + $ref: 'resources/monitoring/monitoring_get_dropletBandwidthMetrics.yml' + + /v2/monitoring/metrics/droplet/cpu: + get: + $ref: 'resources/monitoring/monitoring_get_DropletCpuMetrics.yml' + + /v2/monitoring/metrics/droplet/filesystem_free: + get: + $ref: 'resources/monitoring/monitoring_get_dropletFilesystemFreeMetrics.yml' + + /v2/monitoring/metrics/droplet/filesystem_size: + get: + $ref: 'resources/monitoring/monitoring_get_dropletFilesystemSizeMetrics.yml' + + /v2/monitoring/metrics/droplet/load_1: + get: + $ref: 'resources/monitoring/monitoring_get_dropletLoad1Metrics.yml' + + /v2/monitoring/metrics/droplet/load_5: + get: + $ref: 'resources/monitoring/monitoring_get_dropletLoad5Metrics.yml' + + /v2/monitoring/metrics/droplet/load_15: + get: + $ref: 'resources/monitoring/monitoring_get_dropletLoad15Metrics.yml' + + /v2/monitoring/metrics/droplet/memory_cached: + get: + $ref: 'resources/monitoring/monitoring_get_dropletMemoryCachedMetrics.yml' + + /v2/monitoring/metrics/droplet/memory_free: + get: + $ref: 'resources/monitoring/monitoring_get_dropletMemoryFreeMetrics.yml' + + /v2/monitoring/metrics/droplet/memory_total: + get: + $ref: 'resources/monitoring/monitoring_get_dropletMemoryTotalMetrics.yml' + + /v2/monitoring/metrics/droplet/memory_available: + get: + $ref: 'resources/monitoring/monitoring_get_dropletMemoryAvailableMetrics.yml' + + /v2/projects: + get: + $ref: 'resources/projects/projects_list.yml' + + post: + $ref: 'resources/projects/projects_create.yml' + + /v2/projects/default: + get: + $ref: 'resources/projects/projects_get_default.yml' + + put: + $ref: 'resources/projects/projects_update_default.yml' + + patch: + $ref: 'resources/projects/projects_patch_default.yml' + + /v2/projects/{project_id}: + get: + $ref: 'resources/projects/projects_get.yml' + + put: + $ref: 'resources/projects/projects_update.yml' + + patch: + $ref: 'resources/projects/projects_patch.yml' + + delete: + $ref: 'resources/projects/projects_delete.yml' + + /v2/projects/{project_id}/resources: + get: + $ref: 'resources/projects/projects_list_resources.yml' + + post: + $ref: 'resources/projects/projects_assign_resources.yml' + + /v2/projects/default/resources: + get: + $ref: 'resources/projects/projects_list_resources_default.yml' + + post: + $ref: 'resources/projects/projects_assign_resources_default.yml' + + /v2/regions: + get: + $ref: 'resources/regions/regions_list.yml' + + /v2/registry: + get: + $ref: 'resources/registry/registry_get.yml' + + post: + $ref: 'resources/registry/registry_create.yml' + + delete: + $ref: 'resources/registry/registry_delete.yml' + + /v2/registry/subscription: + get: + $ref: 'resources/registry/registry_get_subscription.yml' + + post: + $ref: 'resources/registry/registry_update_subscription.yml' + + /v2/registry/docker-credentials: + get: + $ref: 'resources/registry/registry_get_dockerCredentials.yml' + + /v2/registry/validate-name: + post: + $ref: 'resources/registry/registry_validate_name.yml' + + /v2/registry/{registry_name}/repositories: + get: + $ref: 'resources/registry/registry_list_repositories.yml' + + /v2/registry/{registry_name}/repositoriesV2: + get: + $ref: 'resources/registry/registry_list_repositoriesV2.yml' + + /v2/registry/{registry_name}/{repository_name}/tags: + get: + $ref: 'resources/registry/registry_list_repositoryTags.yml' + + /v2/registry/{registry_name}/{repository_name}/tags/{repository_tag}: + delete: + $ref: 'resources/registry/registry_delete_repositoryTag.yml' + + /v2/registry/{registry_name}/{repository_name}/digests: + get: + $ref: 'resources/registry/registry_list_repositoryManifests.yml' + + /v2/registry/{registry_name}/{repository_name}/digests/{manifest_digest}: + delete: + $ref: 'resources/registry/registry_delete_repositoryManifest.yml' + + /v2/registry/{registry_name}/garbage-collection: + post: + $ref: 'resources/registry/registry_run_garbageCollection.yml' + + get: + $ref: 'resources/registry/registry_get_garbageCollection.yml' + + /v2/registry/{registry_name}/garbage-collections: + get: + $ref: 'resources/registry/registry_list_garbageCollections.yml' + + /v2/registry/{registry_name}/garbage-collection/{garbage_collection_uuid}: + put: + $ref: 'resources/registry/registry_update_garbageCollection.yml' + + /v2/registry/options: + get: + $ref: 'resources/registry/registry_get_options.yml' + + /v2/reports/droplet_neighbors_ids: + get: + $ref: 'resources/droplets/droplets_list_neighborsIds.yml' + + /v2/reserved_ips: + get: + $ref: 'resources/reserved_ips/reservedIPs_list.yml' + + post: + $ref: 'resources/reserved_ips/reservedIPs_create.yml' + + /v2/reserved_ips/{reserved_ip}: + get: + $ref: 'resources/reserved_ips/reservedIPs_get.yml' + + delete: + $ref: 'resources/reserved_ips/reservedIPs_delete.yml' + + /v2/reserved_ips/{reserved_ip}/actions: + get: + $ref: 'resources/reserved_ips/reservedIPsActions_list.yml' + + post: + $ref: 'resources/reserved_ips/reservedIPsActions_post.yml' + + /v2/reserved_ips/{reserved_ip}/actions/{action_id}: + get: + $ref: 'resources/reserved_ips/reservedIPsActions_get.yml' + + /v2/sizes: + get: + $ref: 'resources/sizes/sizes_list.yml' + + /v2/snapshots: + get: + $ref: 'resources/snapshots/snapshots_list.yml' + + /v2/snapshots/{snapshot_id}: + get: + $ref: 'resources/snapshots/snapshots_get.yml' + + delete: + $ref: 'resources/snapshots/snapshots_delete.yml' + + /v2/tags: + get: + $ref: 'resources/tags/tags_list.yml' + post: + $ref: 'resources/tags/tags_create.yml' + + /v2/tags/{tag_id}: + get: + $ref: 'resources/tags/tags_get.yml' + delete: + $ref: 'resources/tags/tags_delete.yml' + + /v2/tags/{tag_id}/resources: + post: + $ref: 'resources/tags/tags_assign_resources.yml' + delete: + $ref: 'resources/tags/tags_unassign_resources.yml' + + /v2/volumes: + get: + $ref: 'resources/volumes/volumes_list.yml' + post: + $ref: 'resources/volumes/volumes_create.yml' + delete: + $ref: 'resources/volumes/volumes_delete_byName.yml' + + /v2/volumes/actions: + post: + $ref: 'resources/volumes/volumeActions_post.yml' + + /v2/volumes/snapshots/{snapshot_id}: + get: + $ref: 'resources/volumes/volumeSnapshots_get_byId.yml' + delete: + $ref: 'resources/volumes/volumeSnapshots_delete_byId.yml' + + /v2/volumes/{volume_id}: + get: + $ref: 'resources/volumes/volumes_get.yml' + delete: + $ref: 'resources/volumes/volumes_delete.yml' + + /v2/volumes/{volume_id}/actions: + get: + $ref: 'resources/volumes/volumeActions_list.yml' + post: + $ref: 'resources/volumes/volumeActions_post_byId.yml' + + /v2/volumes/{volume_id}/actions/{action_id}: + get: + $ref: 'resources/volumes/volumeActions_get.yml' + + /v2/volumes/{volume_id}/snapshots: + get: + $ref: 'resources/volumes/volumeSnapshots_list.yml' + post: + $ref: 'resources/volumes/volumeSnapshots_create.yml' + + /v2/vpcs: + get: + $ref: 'resources/vpcs/vpcs_list.yml' + + post: + $ref: 'resources/vpcs/vpcs_create.yml' + + /v2/vpcs/{vpc_id}: + get: + $ref: 'resources/vpcs/vpcs_get.yml' + + put: + $ref: 'resources/vpcs/vpcs_update.yml' + + patch: + $ref: 'resources/vpcs/vpcs_patch.yml' + + delete: + $ref: 'resources/vpcs/vpcs_delete.yml' + + /v2/vpcs/{vpc_id}/members: + get: + $ref: 'resources/vpcs/vpcs_list_members.yml' + + + /v2/uptime/checks: + get: + $ref: 'resources/uptime/list_checks.yml' + + post: + $ref: 'resources/uptime/create_check.yml' + + /v2/uptime/checks/{check_id}: + get: + $ref: 'resources/uptime/get_check.yml' + + put: + $ref: 'resources/uptime/update_check.yml' + + delete: + $ref: 'resources/uptime/delete_check.yml' + + /v2/uptime/checks/{check_id}/state: + get: + $ref: 'resources/uptime/get_check_state.yml' + + /v2/uptime/checks/{check_id}/alerts: + get: + $ref: 'resources/uptime/list_alerts.yml' + + post: + $ref: 'resources/uptime/create_alert.yml' + + /v2/uptime/checks/{check_id}/alerts/{alert_id}: + get: + $ref: 'resources/uptime/get_alert.yml' + + put: + $ref: 'resources/uptime/update_alert.yml' + + delete: + $ref: 'resources/uptime/delete_alert.yml' + + +components: + securitySchemes: + bearer_auth: + type: http + scheme: bearer + description: | + ## OAuth Authentication + + In order to interact with the DigitalOcean API, you or your application must + authenticate. + + The DigitalOcean API handles this through OAuth, an open standard for + authorization. OAuth allows you to delegate access to your account in full + or in read-only mode. + + You can generate an OAuth token by visiting the [Apps & API](https://cloud.digitalocean.com/account/api/tokens) + section of the DigitalOcean control panel for your account. + + An OAuth token functions as a complete authentication request. In effect, it + acts as a substitute for a username and password pair. + + Because of this, it is absolutely **essential** that you keep your OAuth + tokens secure. In fact, upon generation, the web interface will only display + each token a single time in order to prevent the token from being compromised. + + DigitalOcean access tokens begin with an identifiable prefix in order to + distinguish them from other similar tokens. + + - `dop_v1_` for personal access tokens generated in the control panel + - `doo_v1_` for tokens generated by applications using [the OAuth flow](https://docs.digitalocean.com/reference/api/oauth-api/) + - `dor_v1_` for OAuth refresh tokens + + ### How to Authenticate with OAuth + + In order to make an authenticated request, include a bearer-type + `Authorization` header containing your OAuth token. All requests must be + made over HTTPS. + + ### Authenticate with a Bearer Authorization Header + + ``` + curl -X $HTTP_METHOD -H "Authorization: Bearer $DIGITALOCEAN_TOKEN" "https://api.digitalocean.com/v2/$OBJECT" + ``` + +security: + - bearer_auth: []