mirror of
https://github.com/LukeHagar/libopenapi.git
synced 2025-12-07 12:37:48 +00:00
Added ‘bundler’ module
resolves a v3 document or a model into a single document. Signed-off-by: quobix <dave@quobix.com>
This commit is contained in:
70
bundler/bundler.go
Normal file
70
bundler/bundler.go
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2023-2024 Princess Beef Heavy Industries, LLC / Dave Shanley
|
||||
// https://pb33f.io
|
||||
// SPDX-License-Identifier: MIT
|
||||
|
||||
package bundler
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/pb33f/libopenapi"
|
||||
"github.com/pb33f/libopenapi/datamodel"
|
||||
"github.com/pb33f/libopenapi/datamodel/high/v3"
|
||||
"github.com/pb33f/libopenapi/index"
|
||||
)
|
||||
|
||||
// BundleBytes will take a byte slice of an OpenAPI specification and return a bundled version of it.
|
||||
// This is useful for when you want to take a specification with external references, and you want to bundle it
|
||||
// into a single document.
|
||||
//
|
||||
// This function will 'resolve' all references in the specification and return a single document. The resulting
|
||||
// document will be a valid OpenAPI specification, containing no references.
|
||||
//
|
||||
// Circular references will not be resolved and will be skipped.
|
||||
func BundleBytes(bytes []byte, configuration *datamodel.DocumentConfiguration) ([]byte, error) {
|
||||
doc, err := libopenapi.NewDocumentWithConfiguration(bytes, configuration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
v3Doc, errs := doc.BuildV3Model()
|
||||
err = errors.Join(errs...)
|
||||
|
||||
bundledBytes, e := BundleDocument(&v3Doc.Model)
|
||||
return bundledBytes, errors.Join(err, e)
|
||||
}
|
||||
|
||||
// BundleDocument will take a v3.Document and return a bundled version of it.
|
||||
// This is useful for when you want to take a document that has been built
|
||||
// from a specification with external references, and you want to bundle it
|
||||
// into a single document.
|
||||
//
|
||||
// This function will 'resolve' all references in the specification and return a single document. The resulting
|
||||
// document will be a valid OpenAPI specification, containing no references.
|
||||
//
|
||||
// Circular references will not be resolved and will be skipped.
|
||||
func BundleDocument(model *v3.Document) ([]byte, error) {
|
||||
rolodex := model.Rolodex
|
||||
compress := func(idx *index.SpecIndex) {
|
||||
mappedReferences := idx.GetMappedReferences()
|
||||
sequencedReferences := idx.GetRawReferencesSequenced()
|
||||
for _, sequenced := range sequencedReferences {
|
||||
mappedReference := mappedReferences[sequenced.FullDefinition]
|
||||
if mappedReference != nil && !mappedReference.Circular {
|
||||
sequenced.Node.Content = mappedReference.Node.Content
|
||||
}
|
||||
if mappedReference != nil && mappedReference.Circular {
|
||||
if idx.GetLogger() != nil {
|
||||
idx.GetLogger().Warn("[bundler] skipping circular reference",
|
||||
"ref", sequenced.FullDefinition)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
indexes := rolodex.GetIndexes()
|
||||
compress(rolodex.GetRootIndex())
|
||||
for _, idx := range indexes {
|
||||
compress(idx)
|
||||
}
|
||||
return model.Render()
|
||||
}
|
||||
133
bundler/bundler_test.go
Normal file
133
bundler/bundler_test.go
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright 2023-2024 Princess Beef Heavy Industries, LLC / Dave Shanley
|
||||
// https://pb33f.io
|
||||
|
||||
package bundler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"github.com/pb33f/libopenapi"
|
||||
"github.com/pb33f/libopenapi/datamodel"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"log"
|
||||
"log/slog"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBundleDocument_DigitalOcean(t *testing.T) {
|
||||
|
||||
// test the mother of all exploded specs.
|
||||
tmp, _ := os.MkdirTemp("", "openapi")
|
||||
cmd := exec.Command("git", "clone", "https://github.com/digitalocean/openapi", tmp)
|
||||
defer os.RemoveAll(filepath.Join(tmp, "openapi"))
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
log.Fatalf("cmd.Run() failed with %s\n", err)
|
||||
}
|
||||
|
||||
spec, _ := filepath.Abs(filepath.Join(tmp+"/specification", "DigitalOcean-public.v2.yaml"))
|
||||
digi, _ := os.ReadFile(spec)
|
||||
|
||||
doc, err := libopenapi.NewDocumentWithConfiguration([]byte(digi), &datamodel.DocumentConfiguration{
|
||||
BasePath: tmp + "/specification",
|
||||
ExtractRefsSequentially: true,
|
||||
Logger: slog.New(slog.NewJSONHandler(os.Stdout, &slog.HandlerOptions{
|
||||
Level: slog.LevelWarn,
|
||||
})),
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
v3Doc, errs := doc.BuildV3Model()
|
||||
if len(errs) > 0 {
|
||||
panic(errs)
|
||||
}
|
||||
|
||||
bytes, e := BundleDocument(&v3Doc.Model)
|
||||
|
||||
assert.NoError(t, e)
|
||||
assert.False(t, strings.Contains("$ref", string(bytes)), "should not contain $ref")
|
||||
|
||||
}
|
||||
|
||||
func TestBundleDocument_Circular(t *testing.T) {
|
||||
|
||||
digi, _ := os.ReadFile("../test_specs/circular-tests.yaml")
|
||||
|
||||
var logs []byte
|
||||
byteBuf := bytes.NewBuffer(logs)
|
||||
|
||||
config := &datamodel.DocumentConfiguration{
|
||||
ExtractRefsSequentially: true,
|
||||
Logger: slog.New(slog.NewJSONHandler(byteBuf, &slog.HandlerOptions{
|
||||
Level: slog.LevelWarn,
|
||||
})),
|
||||
}
|
||||
doc, err := libopenapi.NewDocumentWithConfiguration(digi, config)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
v3Doc, errs := doc.BuildV3Model()
|
||||
|
||||
// three circular ref issues.
|
||||
assert.Len(t, errs, 3)
|
||||
|
||||
bytes, e := BundleDocument(&v3Doc.Model)
|
||||
assert.NoError(t, e)
|
||||
assert.Len(t, bytes, 3069)
|
||||
|
||||
logEntries := strings.Split(byteBuf.String(), "\n")
|
||||
|
||||
assert.Len(t, logEntries, 5)
|
||||
for _, entry := range logEntries {
|
||||
items := make(map[string]any)
|
||||
if entry != "" {
|
||||
_ = json.Unmarshal([]byte(entry), &items)
|
||||
assert.Equal(t, "[bundler] skipping circular reference", items["msg"])
|
||||
}
|
||||
}
|
||||
assert.NoError(t, e)
|
||||
}
|
||||
|
||||
func TestBundleBytes(t *testing.T) {
|
||||
|
||||
digi, _ := os.ReadFile("../test_specs/circular-tests.yaml")
|
||||
|
||||
var logs []byte
|
||||
byteBuf := bytes.NewBuffer(logs)
|
||||
|
||||
config := &datamodel.DocumentConfiguration{
|
||||
ExtractRefsSequentially: true,
|
||||
Logger: slog.New(slog.NewJSONHandler(byteBuf, &slog.HandlerOptions{
|
||||
Level: slog.LevelWarn,
|
||||
})),
|
||||
}
|
||||
|
||||
bytes, e := BundleBytes(digi, config)
|
||||
assert.Error(t, e)
|
||||
assert.Len(t, bytes, 3069)
|
||||
|
||||
logEntries := strings.Split(byteBuf.String(), "\n")
|
||||
|
||||
assert.Len(t, logEntries, 5)
|
||||
for _, entry := range logEntries {
|
||||
items := make(map[string]any)
|
||||
if entry != "" {
|
||||
_ = json.Unmarshal([]byte(entry), &items)
|
||||
assert.Equal(t, "[bundler] skipping circular reference", items["msg"])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBundleBytes_Bad(t *testing.T) {
|
||||
bytes, e := BundleBytes(nil, nil)
|
||||
assert.Error(t, e)
|
||||
assert.Nil(t, bytes)
|
||||
}
|
||||
@@ -101,6 +101,12 @@ type DocumentConfiguration struct {
|
||||
// Logger is a structured logger that will be used for logging errors and warnings. If not set, a default logger
|
||||
// will be used, set to the Error level.
|
||||
Logger *slog.Logger
|
||||
|
||||
// ExtractRefsSequentially will extract all references sequentially, which means the index will look up references
|
||||
// as it finds them, vs looking up everything asynchronously.
|
||||
// This is a more thorough way of building the index, but it's slower. It's required building a document
|
||||
// to be bundled.
|
||||
ExtractRefsSequentially bool
|
||||
}
|
||||
|
||||
func NewDocumentConfiguration() *DocumentConfiguration {
|
||||
|
||||
@@ -46,8 +46,9 @@ func createDocument(info *datamodel.SpecInfo, config *datamodel.DocumentConfigur
|
||||
idxConfig.BaseURL = config.BaseURL
|
||||
idxConfig.BasePath = config.BasePath
|
||||
idxConfig.Logger = config.Logger
|
||||
extract := config.ExtractRefsSequentially
|
||||
idxConfig.ExtractRefsSequentially = extract
|
||||
rolodex := index.NewRolodex(idxConfig)
|
||||
//<-info.GetJSONParsingChannel() // Need to wait for JSON parsing to complete before we can index.
|
||||
rolodex.SetRootNode(info.RootNode)
|
||||
doc.Rolodex = rolodex
|
||||
|
||||
|
||||
@@ -332,6 +332,7 @@ func (d *document) BuildV3Model() (*DocumentModel[v3high.Document], []error) {
|
||||
}
|
||||
|
||||
highDoc := v3high.NewDocument(lowDoc)
|
||||
highDoc.Rolodex = lowDoc.Index.GetRolodex()
|
||||
|
||||
d.highOpenAPI3Model = &DocumentModel[v3high.Document]{
|
||||
Model: *highDoc,
|
||||
|
||||
@@ -571,7 +571,10 @@ func (index *SpecIndex) ExtractComponentsFromRefs(refs []*Reference) []*Referenc
|
||||
var found []*Reference
|
||||
|
||||
// run this async because when things get recursive, it can take a while
|
||||
c := make(chan bool)
|
||||
var c chan bool
|
||||
if !index.config.ExtractRefsSequentially {
|
||||
c = make(chan bool)
|
||||
}
|
||||
|
||||
locate := func(ref *Reference, refIndex int, sequence []*ReferenceMapped) {
|
||||
located := index.FindComponent(ref.FullDefinition)
|
||||
@@ -604,8 +607,10 @@ func (index *SpecIndex) ExtractComponentsFromRefs(refs []*Reference) []*Referenc
|
||||
index.refErrors = append(index.refErrors, indexError)
|
||||
index.errorLock.Unlock()
|
||||
}
|
||||
if !index.config.ExtractRefsSequentially {
|
||||
c <- true
|
||||
}
|
||||
}
|
||||
|
||||
var refsToCheck []*Reference
|
||||
for _, ref := range refs {
|
||||
@@ -615,15 +620,21 @@ func (index *SpecIndex) ExtractComponentsFromRefs(refs []*Reference) []*Referenc
|
||||
|
||||
for r := range refsToCheck {
|
||||
// expand our index of all mapped refs
|
||||
go locate(refsToCheck[r], r, mappedRefsInSequence)
|
||||
//locate(refsToCheck[r], r, mappedRefsInSequence) // used for sync testing.
|
||||
if !index.config.ExtractRefsSequentially {
|
||||
go locate(refsToCheck[r], r, mappedRefsInSequence) // run async
|
||||
} else {
|
||||
locate(refsToCheck[r], r, mappedRefsInSequence) // run synchronously
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if !index.config.ExtractRefsSequentially {
|
||||
completedRefs := 0
|
||||
for completedRefs < len(refsToCheck) {
|
||||
<-c
|
||||
completedRefs++
|
||||
}
|
||||
}
|
||||
for m := range mappedRefsInSequence {
|
||||
if mappedRefsInSequence[m] != nil {
|
||||
index.allMappedRefsSequenced = append(index.allMappedRefsSequenced, mappedRefsInSequence[m])
|
||||
|
||||
@@ -148,6 +148,12 @@ type SpecIndexConfig struct {
|
||||
// the file is a JSON Schema. To allow JSON Schema files to be included set this to true.
|
||||
SkipDocumentCheck bool
|
||||
|
||||
// ExtractRefsSequentially will extract all references sequentially, which means the index will look up references
|
||||
// as it finds them, vs looking up everything asynchronously.
|
||||
// This is a more thorough way of building the index, but it's slower. It's required building a document
|
||||
// to be bundled.
|
||||
ExtractRefsSequentially bool
|
||||
|
||||
// private fields
|
||||
uri []string
|
||||
}
|
||||
|
||||
@@ -230,6 +230,12 @@ func (index *SpecIndex) GetMappedReferences() map[string]*Reference {
|
||||
return index.allMappedRefs
|
||||
}
|
||||
|
||||
// GetRawReferencesSequenced returns a slice of every single reference found in the document, extracted raw from the doc
|
||||
// returned in the exact order they were found in the document.
|
||||
func (index *SpecIndex) GetRawReferencesSequenced() []*Reference {
|
||||
return index.rawSequencedRefs
|
||||
}
|
||||
|
||||
// GetMappedReferencesSequenced will return all references that were mapped successfully to nodes, performed in sequence
|
||||
// as they were read in from the document.
|
||||
func (index *SpecIndex) GetMappedReferencesSequenced() []*ReferenceMapped {
|
||||
|
||||
Reference in New Issue
Block a user