From ebd24efca72f2b4fadb3eda66dd23a0d6def5aef Mon Sep 17 00:00:00 2001 From: Niklas Nielsen Date: Fri, 24 May 2024 19:20:08 -0700 Subject: [PATCH] Initial commit --- Makefile | 3 + README.md | 354 + USAGE.md | 18 + .../chatcompletionfunctioncalloption.md | 11 + .../components/chatcompletionfunctions.md | 12 + .../chatcompletionmessagetoolcall.md | 10 + .../chatcompletionmessagetoolcalltype.md | 10 + .../chatcompletionnamedtoolchoice.md | 11 + .../chatcompletionnamedtoolchoicefunction.md | 8 + .../chatcompletionnamedtoolchoicetype.md | 10 + .../chatcompletionrequestassistantmessage.md | 12 + ...atcompletionrequestassistantmessagerole.md | 10 + .../chatcompletionrequestfunctionmessage.md | 12 + ...hatcompletionrequestfunctionmessagerole.md | 10 + .../chatcompletionrequestmessage.md | 35 + ...chatcompletionrequestmessagecontentpart.md | 17 + ...ompletionrequestmessagecontentpartimage.md | 9 + ...etionrequestmessagecontentpartimagetype.md | 10 + ...completionrequestmessagecontentparttext.md | 9 + .../chatcompletionrequestsystemmessage.md | 10 + .../chatcompletionrequesttoolmessage.md | 10 + .../chatcompletionrequesttoolmessagerole.md | 10 + .../chatcompletionrequestusermessage.md | 10 + .../chatcompletionrequestusermessagerole.md | 10 + .../chatcompletionresponsemessage.md | 13 + ...atcompletionresponsemessagefunctioncall.md | 13 + .../chatcompletionresponsemessagerole.md | 10 + .../components/chatcompletionstreamoptions.md | 11 + .../components/chatcompletiontokenlogprob.md | 11 + docs/models/components/chatcompletiontool.md | 9 + .../chatcompletiontoolchoiceoption.md | 26 + .../chatcompletiontoolchoiceoption1.md | 13 + .../components/chatcompletiontooltype.md | 10 + docs/models/components/choices.md | 11 + docs/models/components/completion.md | 19 + .../components/completiontagsselector.md | 9 + docs/models/components/completionusage.md | 12 + docs/models/components/content.md | 20 + .../components/createchatcompletionrequest.md | 28 + ...createchatcompletionrequestfunctioncall.md | 29 + .../createchatcompletionrequesttype.md | 11 + .../createchatcompletionresponse.md | 16 + docs/models/components/detail.md | 12 + docs/models/components/feedback.md | 14 + docs/models/components/finishreason.md | 18 + docs/models/components/function.md | 11 + docs/models/components/functioncall.md | 13 + docs/models/components/functionobject.md | 10 + docs/models/components/httpmetadata.md | 9 + docs/models/components/imageurl.md | 9 + docs/models/components/jsonschema.md | 9 + docs/models/components/jsonvalues.md | 9 + docs/models/components/kind.md | 11 + docs/models/components/logprobs.md | 10 + docs/models/components/model.md | 19 + docs/models/components/object.md | 10 + docs/models/components/one.md | 12 + docs/models/components/responseformat.md | 15 + docs/models/components/role.md | 10 + docs/models/components/security.md | 8 + docs/models/components/session.md | 8 + docs/models/components/stacktrace.md | 11 + docs/models/components/status.md | 12 + docs/models/components/stop.md | 20 + docs/models/components/task.md | 13 + docs/models/components/toplogprobs.md | 10 + docs/models/components/two.md | 26 + docs/models/components/type.md | 10 + docs/models/internal/globals.md | 8 + .../operations/createfeedbacktaskresponse.md | 9 + docs/models/operations/createglobals.md | 8 + docs/models/operations/createrequest.md | 9 + docs/models/operations/createresponse.md | 10 + .../models/operations/createsessionglobals.md | 8 + .../models/operations/createsessionrequest.md | 8 + .../operations/createsessionresponse.md | 9 + .../operations/createsessionresponsebody.md | 10 + .../operations/getfeedbacktaskrequest.md | 8 + .../operations/getfeedbacktaskresponse.md | 9 + docs/models/operations/getglobals.md | 8 + docs/models/operations/getrequest.md | 9 + docs/models/operations/getresponse.md | 9 + docs/models/operations/jsonvalues.md | 9 + .../operations/listfeedbacktasksresponse.md | 9 + docs/models/operations/listglobals.md | 8 + docs/models/operations/listrequest.md | 9 + docs/models/operations/listrequestbody.md | 11 + docs/models/operations/listresponse.md | 9 + docs/models/operations/listresponsebody.md | 10 + docs/models/operations/listungradedglobals.md | 8 + docs/models/operations/listungradedrequest.md | 8 + .../models/operations/listungradedresponse.md | 9 + .../operations/listungradedresponsebody.md | 10 + docs/models/operations/one.md | 17 + .../operations/requestbodyjsonvalues.md | 9 + docs/models/operations/two.md | 15 + docs/models/operations/updateglobals.md | 8 + docs/models/operations/updaterequest.md | 10 + docs/models/operations/updateresponse.md | 9 + docs/models/operations/uploadglobals.md | 8 + docs/models/operations/uploadrequest.md | 9 + docs/models/operations/uploadrequestbody.md | 17 + docs/models/operations/uploadresponse.md | 9 + docs/sdks/completions/README.md | 170 + docs/sdks/feedback/README.md | 144 + docs/sdks/feedbacktasks/README.md | 129 + docs/sdks/log10/README.md | 9 + docs/sdks/sessions/README.md | 49 + openai.yaml | 13360 ++++++++++++++++ openapi.yaml | 475 + py.typed | 1 + pylintrc | 647 + setup.py | 45 + src/log10/__init__.py | 4 + src/log10/_hooks/__init__.py | 4 + src/log10/_hooks/sdkhooks.py | 55 + src/log10/_hooks/types.py | 74 + src/log10/completions.py | 234 + src/log10/feedback.py | 224 + src/log10/feedbacktasks.py | 190 + src/log10/models/__init__.py | 4 + src/log10/models/components/__init__.py | 32 + .../chatcompletionfunctioncalloption.py | 16 + .../components/chatcompletionfunctions.py | 24 + .../chatcompletionmessagetoolcall.py | 37 + .../chatcompletionnamedtoolchoice.py | 32 + .../chatcompletionrequestassistantmessage.py | 50 + .../chatcompletionrequestfunctionmessage.py | 27 + .../chatcompletionrequestmessage.py | 11 + ...chatcompletionrequestmessagecontentpart.py | 8 + ...ompletionrequestmessagecontentpartimage.py | 41 + ...completionrequestmessagecontentparttext.py | 23 + .../chatcompletionrequestsystemmessage.py | 26 + .../chatcompletionrequesttoolmessage.py | 25 + .../chatcompletionrequestusermessage.py | 29 + .../chatcompletionresponsemessage.py | 48 + .../components/chatcompletionstreamoptions.py | 17 + .../components/chatcompletiontokenlogprob.py | 35 + .../models/components/chatcompletiontool.py | 23 + .../chatcompletiontoolchoiceoption.py | 15 + src/log10/models/components/completion.py | 68 + .../models/components/completionusage.py | 20 + .../components/createchatcompletionrequest.py | 167 + .../createchatcompletionresponse.py | 83 + src/log10/models/components/feedback.py | 34 + src/log10/models/components/functionobject.py | 23 + src/log10/models/components/httpmetadata.py | 17 + src/log10/models/components/security.py | 12 + src/log10/models/components/session.py | 16 + src/log10/models/components/task.py | 40 + src/log10/models/errors/__init__.py | 5 + src/log10/models/errors/sdkerror.py | 24 + src/log10/models/internal/__init__.py | 5 + src/log10/models/internal/globals.py | 11 + src/log10/models/operations/__init__.py | 14 + src/log10/models/operations/create.py | 35 + .../models/operations/createfeedbacktask.py | 18 + src/log10/models/operations/createsession.py | 42 + src/log10/models/operations/get.py | 34 + .../models/operations/getfeedbacktask.py | 26 + src/log10/models/operations/list.py | 58 + .../models/operations/listfeedbacktasks.py | 18 + src/log10/models/operations/listungraded.py | 42 + src/log10/models/operations/update.py | 35 + src/log10/models/operations/upload.py | 98 + src/log10/sdk.py | 97 + src/log10/sdkconfiguration.py | 45 + src/log10/sessions.py | 83 + src/log10/utils/__init__.py | 4 + src/log10/utils/retries.py | 119 + src/log10/utils/utils.py | 1087 ++ 171 files changed, 20261 insertions(+) create mode 100644 Makefile create mode 100644 README.md create mode 100644 USAGE.md create mode 100644 docs/models/components/chatcompletionfunctioncalloption.md create mode 100644 docs/models/components/chatcompletionfunctions.md create mode 100644 docs/models/components/chatcompletionmessagetoolcall.md create mode 100644 docs/models/components/chatcompletionmessagetoolcalltype.md create mode 100644 docs/models/components/chatcompletionnamedtoolchoice.md create mode 100644 docs/models/components/chatcompletionnamedtoolchoicefunction.md create mode 100644 docs/models/components/chatcompletionnamedtoolchoicetype.md create mode 100644 docs/models/components/chatcompletionrequestassistantmessage.md create mode 100644 docs/models/components/chatcompletionrequestassistantmessagerole.md create mode 100644 docs/models/components/chatcompletionrequestfunctionmessage.md create mode 100644 docs/models/components/chatcompletionrequestfunctionmessagerole.md create mode 100644 docs/models/components/chatcompletionrequestmessage.md create mode 100644 docs/models/components/chatcompletionrequestmessagecontentpart.md create mode 100644 docs/models/components/chatcompletionrequestmessagecontentpartimage.md create mode 100644 docs/models/components/chatcompletionrequestmessagecontentpartimagetype.md create mode 100644 docs/models/components/chatcompletionrequestmessagecontentparttext.md create mode 100644 docs/models/components/chatcompletionrequestsystemmessage.md create mode 100644 docs/models/components/chatcompletionrequesttoolmessage.md create mode 100644 docs/models/components/chatcompletionrequesttoolmessagerole.md create mode 100644 docs/models/components/chatcompletionrequestusermessage.md create mode 100644 docs/models/components/chatcompletionrequestusermessagerole.md create mode 100644 docs/models/components/chatcompletionresponsemessage.md create mode 100644 docs/models/components/chatcompletionresponsemessagefunctioncall.md create mode 100644 docs/models/components/chatcompletionresponsemessagerole.md create mode 100644 docs/models/components/chatcompletionstreamoptions.md create mode 100644 docs/models/components/chatcompletiontokenlogprob.md create mode 100644 docs/models/components/chatcompletiontool.md create mode 100644 docs/models/components/chatcompletiontoolchoiceoption.md create mode 100644 docs/models/components/chatcompletiontoolchoiceoption1.md create mode 100644 docs/models/components/chatcompletiontooltype.md create mode 100644 docs/models/components/choices.md create mode 100644 docs/models/components/completion.md create mode 100644 docs/models/components/completiontagsselector.md create mode 100644 docs/models/components/completionusage.md create mode 100644 docs/models/components/content.md create mode 100644 docs/models/components/createchatcompletionrequest.md create mode 100644 docs/models/components/createchatcompletionrequestfunctioncall.md create mode 100644 docs/models/components/createchatcompletionrequesttype.md create mode 100644 docs/models/components/createchatcompletionresponse.md create mode 100644 docs/models/components/detail.md create mode 100644 docs/models/components/feedback.md create mode 100644 docs/models/components/finishreason.md create mode 100644 docs/models/components/function.md create mode 100644 docs/models/components/functioncall.md create mode 100644 docs/models/components/functionobject.md create mode 100644 docs/models/components/httpmetadata.md create mode 100644 docs/models/components/imageurl.md create mode 100644 docs/models/components/jsonschema.md create mode 100644 docs/models/components/jsonvalues.md create mode 100644 docs/models/components/kind.md create mode 100644 docs/models/components/logprobs.md create mode 100644 docs/models/components/model.md create mode 100644 docs/models/components/object.md create mode 100644 docs/models/components/one.md create mode 100644 docs/models/components/responseformat.md create mode 100644 docs/models/components/role.md create mode 100644 docs/models/components/security.md create mode 100644 docs/models/components/session.md create mode 100644 docs/models/components/stacktrace.md create mode 100644 docs/models/components/status.md create mode 100644 docs/models/components/stop.md create mode 100644 docs/models/components/task.md create mode 100644 docs/models/components/toplogprobs.md create mode 100644 docs/models/components/two.md create mode 100644 docs/models/components/type.md create mode 100644 docs/models/internal/globals.md create mode 100644 docs/models/operations/createfeedbacktaskresponse.md create mode 100644 docs/models/operations/createglobals.md create mode 100644 docs/models/operations/createrequest.md create mode 100644 docs/models/operations/createresponse.md create mode 100644 docs/models/operations/createsessionglobals.md create mode 100644 docs/models/operations/createsessionrequest.md create mode 100644 docs/models/operations/createsessionresponse.md create mode 100644 docs/models/operations/createsessionresponsebody.md create mode 100644 docs/models/operations/getfeedbacktaskrequest.md create mode 100644 docs/models/operations/getfeedbacktaskresponse.md create mode 100644 docs/models/operations/getglobals.md create mode 100644 docs/models/operations/getrequest.md create mode 100644 docs/models/operations/getresponse.md create mode 100644 docs/models/operations/jsonvalues.md create mode 100644 docs/models/operations/listfeedbacktasksresponse.md create mode 100644 docs/models/operations/listglobals.md create mode 100644 docs/models/operations/listrequest.md create mode 100644 docs/models/operations/listrequestbody.md create mode 100644 docs/models/operations/listresponse.md create mode 100644 docs/models/operations/listresponsebody.md create mode 100644 docs/models/operations/listungradedglobals.md create mode 100644 docs/models/operations/listungradedrequest.md create mode 100644 docs/models/operations/listungradedresponse.md create mode 100644 docs/models/operations/listungradedresponsebody.md create mode 100644 docs/models/operations/one.md create mode 100644 docs/models/operations/requestbodyjsonvalues.md create mode 100644 docs/models/operations/two.md create mode 100644 docs/models/operations/updateglobals.md create mode 100644 docs/models/operations/updaterequest.md create mode 100644 docs/models/operations/updateresponse.md create mode 100644 docs/models/operations/uploadglobals.md create mode 100644 docs/models/operations/uploadrequest.md create mode 100644 docs/models/operations/uploadrequestbody.md create mode 100644 docs/models/operations/uploadresponse.md create mode 100644 docs/sdks/completions/README.md create mode 100644 docs/sdks/feedback/README.md create mode 100644 docs/sdks/feedbacktasks/README.md create mode 100644 docs/sdks/log10/README.md create mode 100644 docs/sdks/sessions/README.md create mode 100644 openai.yaml create mode 100644 openapi.yaml create mode 100644 py.typed create mode 100644 pylintrc create mode 100644 setup.py create mode 100644 src/log10/__init__.py create mode 100644 src/log10/_hooks/__init__.py create mode 100644 src/log10/_hooks/sdkhooks.py create mode 100644 src/log10/_hooks/types.py create mode 100644 src/log10/completions.py create mode 100644 src/log10/feedback.py create mode 100644 src/log10/feedbacktasks.py create mode 100644 src/log10/models/__init__.py create mode 100644 src/log10/models/components/__init__.py create mode 100644 src/log10/models/components/chatcompletionfunctioncalloption.py create mode 100644 src/log10/models/components/chatcompletionfunctions.py create mode 100644 src/log10/models/components/chatcompletionmessagetoolcall.py create mode 100644 src/log10/models/components/chatcompletionnamedtoolchoice.py create mode 100644 src/log10/models/components/chatcompletionrequestassistantmessage.py create mode 100644 src/log10/models/components/chatcompletionrequestfunctionmessage.py create mode 100644 src/log10/models/components/chatcompletionrequestmessage.py create mode 100644 src/log10/models/components/chatcompletionrequestmessagecontentpart.py create mode 100644 src/log10/models/components/chatcompletionrequestmessagecontentpartimage.py create mode 100644 src/log10/models/components/chatcompletionrequestmessagecontentparttext.py create mode 100644 src/log10/models/components/chatcompletionrequestsystemmessage.py create mode 100644 src/log10/models/components/chatcompletionrequesttoolmessage.py create mode 100644 src/log10/models/components/chatcompletionrequestusermessage.py create mode 100644 src/log10/models/components/chatcompletionresponsemessage.py create mode 100644 src/log10/models/components/chatcompletionstreamoptions.py create mode 100644 src/log10/models/components/chatcompletiontokenlogprob.py create mode 100644 src/log10/models/components/chatcompletiontool.py create mode 100644 src/log10/models/components/chatcompletiontoolchoiceoption.py create mode 100644 src/log10/models/components/completion.py create mode 100644 src/log10/models/components/completionusage.py create mode 100644 src/log10/models/components/createchatcompletionrequest.py create mode 100644 src/log10/models/components/createchatcompletionresponse.py create mode 100644 src/log10/models/components/feedback.py create mode 100644 src/log10/models/components/functionobject.py create mode 100644 src/log10/models/components/httpmetadata.py create mode 100644 src/log10/models/components/security.py create mode 100644 src/log10/models/components/session.py create mode 100644 src/log10/models/components/task.py create mode 100644 src/log10/models/errors/__init__.py create mode 100644 src/log10/models/errors/sdkerror.py create mode 100644 src/log10/models/internal/__init__.py create mode 100644 src/log10/models/internal/globals.py create mode 100644 src/log10/models/operations/__init__.py create mode 100644 src/log10/models/operations/create.py create mode 100644 src/log10/models/operations/createfeedbacktask.py create mode 100644 src/log10/models/operations/createsession.py create mode 100644 src/log10/models/operations/get.py create mode 100644 src/log10/models/operations/getfeedbacktask.py create mode 100644 src/log10/models/operations/list.py create mode 100644 src/log10/models/operations/listfeedbacktasks.py create mode 100644 src/log10/models/operations/listungraded.py create mode 100644 src/log10/models/operations/update.py create mode 100644 src/log10/models/operations/upload.py create mode 100644 src/log10/sdk.py create mode 100644 src/log10/sdkconfiguration.py create mode 100644 src/log10/sessions.py create mode 100644 src/log10/utils/__init__.py create mode 100644 src/log10/utils/retries.py create mode 100644 src/log10/utils/utils.py diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..cb5127b --- /dev/null +++ b/Makefile @@ -0,0 +1,3 @@ +download_specs: + wget -q https://raw.githubusercontent.com/log10-io/openapi/main/openapi.yaml -O openapi.yaml + wget -q https://raw.githubusercontent.com/log10-io/openapi/main/openai.yaml -O openai.yaml diff --git a/README.md b/README.md new file mode 100644 index 0000000..f412867 --- /dev/null +++ b/README.md @@ -0,0 +1,354 @@ +# log10py + +
+ + + + +
+ + +## 🏗 **Welcome to your new SDK!** 🏗 + +It has been generated successfully based on your OpenAPI spec. However, it is not yet ready for production use. Here are some next steps: +- [ ] 🛠 Make your SDK feel handcrafted by [customizing it](https://www.speakeasyapi.dev/docs/customize-sdks) +- [ ] ♻️ Refine your SDK quickly by iterating locally with the [Speakeasy CLI](https://github.com/speakeasy-api/speakeasy) +- [ ] 🎁 Publish your SDK to package managers by [configuring automatic publishing](https://www.speakeasyapi.dev/docs/advanced-setup/publish-sdks) +- [ ] ✨ When ready to productionize, delete this section from the README + + +## SDK Installation + +```bash +pip install git+.git +``` + + + +## SDK Example Usage + +### Example + +```python +import log10 + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.sessions.create(x_log10_organization='') + +if res.object is not None: + # handle response + pass + +``` + + + +## Available Resources and Operations + +### [completions](docs/sdks/completions/README.md) + +* [create](docs/sdks/completions/README.md#create) - Create a completion +* [update](docs/sdks/completions/README.md#update) - Update completion by id. +* [list_ungraded](docs/sdks/completions/README.md#list_ungraded) - List ungraded completions i.e. completions that have not been associated with feedback but matches task selector. + +### [sessions](docs/sdks/sessions/README.md) + +* [create](docs/sdks/sessions/README.md#create) - Create a session + +### [feedback](docs/sdks/feedback/README.md) + +* [get](docs/sdks/feedback/README.md#get) - Fetch feedback by id. +* [list](docs/sdks/feedback/README.md#list) - List feedback +* [upload](docs/sdks/feedback/README.md#upload) - Upload a piece of feedback + +### [feedback_tasks](docs/sdks/feedbacktasks/README.md) + +* [list](docs/sdks/feedbacktasks/README.md#list) - List feedback tasks. +* [create](docs/sdks/feedbacktasks/README.md#create) - Create a new task. +* [get](docs/sdks/feedbacktasks/README.md#get) - Retrieves feedback task `taskId`. + + + +## Global Parameters + +A parameter is configured globally. This parameter must be set on the SDK client instance itself during initialization. When configured as an option during SDK initialization, This global value will be used as the default on the operations that use it. When such operations are called, there is a place in each to override the global value, if needed. + +For example, you can set `X-Log10-Organization` to `''` at SDK initialization and then you do not have to pass the same value on calls to operations like `update`. But if you want to do so you may, which will locally override the global setting. See the example code below for a demonstration. + + +### Available Globals + +The following global parameter is available. The required parameter must be set when you initialize the SDK client. + +| Name | Type | Required | Description | +| ---- | ---- |:--------:| ----------- | +| x_log10_organization | str | ✔️ | The x_log10_organization parameter. | + + +### Example + +```python +import log10 +from log10.models import components + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.completions.update(completion_id='', completion=components.Completion( + organization_id='', + request=components.CreateChatCompletionRequest( + messages=[ + components.ChatCompletionRequestFunctionMessage( + role=components.ChatCompletionRequestFunctionMessageRole.FUNCTION, + content='', + name='', + ), + ], + model=components.Two.GPT_4_TURBO, + n=1, + response_format=components.ResponseFormat( + type=components.CreateChatCompletionRequestType.JSON_OBJECT, + ), + temperature=1, + top_p=1, + user='user-1234', + ), +), x_log10_organization='') + +if res.completion is not None: + # handle response + pass + +``` + + + +## Error Handling + +Handling errors in this SDK should largely match your expectations. All operations return a response object or raise an error. If Error objects are specified in your OpenAPI Spec, the SDK will raise the appropriate Error type. + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | + +### Example + +```python +import log10 +from log10.models import components, errors + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + +res = None +try: + res = s.completions.create(completion=components.Completion( + organization_id='', + request=components.CreateChatCompletionRequest( + messages=[ + components.ChatCompletionRequestAssistantMessage( + role=components.ChatCompletionRequestAssistantMessageRole.ASSISTANT, + ), + ], + model=components.Two.GPT_4_TURBO, + n=1, + response_format=components.ResponseFormat( + type=components.CreateChatCompletionRequestType.JSON_OBJECT, + ), + temperature=1, + top_p=1, + user='user-1234', + ), +), x_log10_organization='') + +except errors.SDKError as e: + # handle exception + raise(e) + +if res.any is not None: + # handle response + pass + +``` + + + +## Server Selection + +### Select Server by Index + +You can override the default server globally by passing a server index to the `server_idx: int` optional parameter when initializing the SDK client instance. The selected server will then be used as the default on the operations that use it. This table lists the indexes associated with the available servers: + +| # | Server | Variables | +| - | ------ | --------- | +| 0 | `https://log10.io` | None | + +#### Example + +```python +import log10 +from log10.models import components + +s = log10.Log10( + server_idx=0, + log10_token="", + x_log10_organization='', +) + + +res = s.completions.create(completion=components.Completion( + organization_id='', + request=components.CreateChatCompletionRequest( + messages=[ + components.ChatCompletionRequestAssistantMessage( + role=components.ChatCompletionRequestAssistantMessageRole.ASSISTANT, + ), + ], + model=components.Two.GPT_4_TURBO, + n=1, + response_format=components.ResponseFormat( + type=components.CreateChatCompletionRequestType.JSON_OBJECT, + ), + temperature=1, + top_p=1, + user='user-1234', + ), +), x_log10_organization='') + +if res.any is not None: + # handle response + pass + +``` + + +### Override Server URL Per-Client + +The default server can also be overridden globally by passing a URL to the `server_url: str` optional parameter when initializing the SDK client instance. For example: +```python +import log10 +from log10.models import components + +s = log10.Log10( + server_url="https://log10.io", + log10_token="", + x_log10_organization='', +) + + +res = s.completions.create(completion=components.Completion( + organization_id='', + request=components.CreateChatCompletionRequest( + messages=[ + components.ChatCompletionRequestAssistantMessage( + role=components.ChatCompletionRequestAssistantMessageRole.ASSISTANT, + ), + ], + model=components.Two.GPT_4_TURBO, + n=1, + response_format=components.ResponseFormat( + type=components.CreateChatCompletionRequestType.JSON_OBJECT, + ), + temperature=1, + top_p=1, + user='user-1234', + ), +), x_log10_organization='') + +if res.any is not None: + # handle response + pass + +``` + + + +## Custom HTTP Client + +The Python SDK makes API calls using the [requests](https://pypi.org/project/requests/) HTTP library. In order to provide a convenient way to configure timeouts, cookies, proxies, custom headers, and other low-level configuration, you can initialize the SDK client with a custom `requests.Session` object. + +For example, you could specify a header for every request that this sdk makes as follows: +```python +import log10 +import requests + +http_client = requests.Session() +http_client.headers.update({'x-custom-header': 'someValue'}) +s = log10.Log10(client=http_client) +``` + + + +## Authentication + +### Per-Client Security Schemes + +This SDK supports the following security scheme globally: + +| Name | Type | Scheme | +| ------------- | ------------- | ------------- | +| `log10_token` | apiKey | API key | + +To authenticate with the API the `log10_token` parameter must be set when initializing the SDK client instance. For example: +```python +import log10 +from log10.models import components + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.completions.create(completion=components.Completion( + organization_id='', + request=components.CreateChatCompletionRequest( + messages=[ + components.ChatCompletionRequestAssistantMessage( + role=components.ChatCompletionRequestAssistantMessageRole.ASSISTANT, + ), + ], + model=components.Two.GPT_4_TURBO, + n=1, + response_format=components.ResponseFormat( + type=components.CreateChatCompletionRequestType.JSON_OBJECT, + ), + temperature=1, + top_p=1, + user='user-1234', + ), +), x_log10_organization='') + +if res.any is not None: + # handle response + pass + +``` + + + + +# Development + +## Maturity + +This SDK is in beta, and there may be breaking changes between versions without a major version update. Therefore, we recommend pinning usage +to a specific package version. This way, you can install the same version each time without breaking changes unless you are intentionally +looking for the latest version. + +## Contributions + +While we value open-source contributions to this SDK, this library is generated programmatically. +Feel free to open a PR or a Github issue as a proof of concept and we'll do our best to include it in a future release! + +### SDK Created by [Speakeasy](https://docs.speakeasyapi.dev/docs/using-speakeasy/client-sdks) diff --git a/USAGE.md b/USAGE.md new file mode 100644 index 0000000..66e449a --- /dev/null +++ b/USAGE.md @@ -0,0 +1,18 @@ + +```python +import log10 + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.sessions.create(x_log10_organization='') + +if res.object is not None: + # handle response + pass + +``` + \ No newline at end of file diff --git a/docs/models/components/chatcompletionfunctioncalloption.md b/docs/models/components/chatcompletionfunctioncalloption.md new file mode 100644 index 0000000..1d62bc9 --- /dev/null +++ b/docs/models/components/chatcompletionfunctioncalloption.md @@ -0,0 +1,11 @@ +# ChatCompletionFunctionCallOption + +Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------- | --------------------------------- | --------------------------------- | --------------------------------- | +| `name` | *str* | :heavy_check_mark: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionfunctions.md b/docs/models/components/chatcompletionfunctions.md new file mode 100644 index 0000000..6674257 --- /dev/null +++ b/docs/models/components/chatcompletionfunctions.md @@ -0,0 +1,12 @@ +# ~~ChatCompletionFunctions~~ + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | +| `description` | *Optional[str]* | :heavy_minus_sign: | A description of what the function does, used by the model to choose when and how to call the function. | +| `parameters` | Dict[str, *Any*] | :heavy_minus_sign: | The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.

Omitting `parameters` defines a function with an empty parameter list. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionmessagetoolcall.md b/docs/models/components/chatcompletionmessagetoolcall.md new file mode 100644 index 0000000..f72cfae --- /dev/null +++ b/docs/models/components/chatcompletionmessagetoolcall.md @@ -0,0 +1,10 @@ +# ChatCompletionMessageToolCall + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | +| `id` | *str* | :heavy_check_mark: | The ID of the tool call. | +| `type` | [components.ChatCompletionMessageToolCallType](../../models/components/chatcompletionmessagetoolcalltype.md) | :heavy_check_mark: | The type of the tool. Currently, only `function` is supported. | +| `function` | [components.Function](../../models/components/function.md) | :heavy_check_mark: | The function that the model called. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionmessagetoolcalltype.md b/docs/models/components/chatcompletionmessagetoolcalltype.md new file mode 100644 index 0000000..e78f969 --- /dev/null +++ b/docs/models/components/chatcompletionmessagetoolcalltype.md @@ -0,0 +1,10 @@ +# ChatCompletionMessageToolCallType + +The type of the tool. Currently, only `function` is supported. + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/components/chatcompletionnamedtoolchoice.md b/docs/models/components/chatcompletionnamedtoolchoice.md new file mode 100644 index 0000000..7c36dba --- /dev/null +++ b/docs/models/components/chatcompletionnamedtoolchoice.md @@ -0,0 +1,11 @@ +# ChatCompletionNamedToolChoice + +Specifies a tool the model should use. Use to force the model to call a specific function. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------- | +| `type` | [components.ChatCompletionNamedToolChoiceType](../../models/components/chatcompletionnamedtoolchoicetype.md) | :heavy_check_mark: | The type of the tool. Currently, only `function` is supported. | +| `function` | [components.ChatCompletionNamedToolChoiceFunction](../../models/components/chatcompletionnamedtoolchoicefunction.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/components/chatcompletionnamedtoolchoicefunction.md b/docs/models/components/chatcompletionnamedtoolchoicefunction.md new file mode 100644 index 0000000..84ca480 --- /dev/null +++ b/docs/models/components/chatcompletionnamedtoolchoicefunction.md @@ -0,0 +1,8 @@ +# ChatCompletionNamedToolChoiceFunction + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------- | --------------------------------- | --------------------------------- | --------------------------------- | +| `name` | *str* | :heavy_check_mark: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionnamedtoolchoicetype.md b/docs/models/components/chatcompletionnamedtoolchoicetype.md new file mode 100644 index 0000000..b338936 --- /dev/null +++ b/docs/models/components/chatcompletionnamedtoolchoicetype.md @@ -0,0 +1,10 @@ +# ChatCompletionNamedToolChoiceType + +The type of the tool. Currently, only `function` is supported. + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestassistantmessage.md b/docs/models/components/chatcompletionrequestassistantmessage.md new file mode 100644 index 0000000..f6fd74a --- /dev/null +++ b/docs/models/components/chatcompletionrequestassistantmessage.md @@ -0,0 +1,12 @@ +# ChatCompletionRequestAssistantMessage + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `role` | [components.ChatCompletionRequestAssistantMessageRole](../../models/components/chatcompletionrequestassistantmessagerole.md) | :heavy_check_mark: | The role of the messages author, in this case `assistant`. | +| `content` | *Optional[str]* | :heavy_minus_sign: | The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.
| +| `name` | *Optional[str]* | :heavy_minus_sign: | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | +| `tool_calls` | List[[components.ChatCompletionMessageToolCall](../../models/components/chatcompletionmessagetoolcall.md)] | :heavy_minus_sign: | The tool calls generated by the model, such as function calls. | +| ~~`function_call`~~ | [Optional[components.FunctionCall]](../../models/components/functioncall.md) | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.

Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestassistantmessagerole.md b/docs/models/components/chatcompletionrequestassistantmessagerole.md new file mode 100644 index 0000000..4898659 --- /dev/null +++ b/docs/models/components/chatcompletionrequestassistantmessagerole.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestAssistantMessageRole + +The role of the messages author, in this case `assistant`. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestfunctionmessage.md b/docs/models/components/chatcompletionrequestfunctionmessage.md new file mode 100644 index 0000000..87285cb --- /dev/null +++ b/docs/models/components/chatcompletionrequestfunctionmessage.md @@ -0,0 +1,12 @@ +# ~~ChatCompletionRequestFunctionMessage~~ + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------- | +| `role` | [components.ChatCompletionRequestFunctionMessageRole](../../models/components/chatcompletionrequestfunctionmessagerole.md) | :heavy_check_mark: | The role of the messages author, in this case `function`. | +| `content` | *Optional[str]* | :heavy_check_mark: | The contents of the function message. | +| `name` | *str* | :heavy_check_mark: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestfunctionmessagerole.md b/docs/models/components/chatcompletionrequestfunctionmessagerole.md new file mode 100644 index 0000000..6bbfef6 --- /dev/null +++ b/docs/models/components/chatcompletionrequestfunctionmessagerole.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestFunctionMessageRole + +The role of the messages author, in this case `function`. + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestmessage.md b/docs/models/components/chatcompletionrequestmessage.md new file mode 100644 index 0000000..a315176 --- /dev/null +++ b/docs/models/components/chatcompletionrequestmessage.md @@ -0,0 +1,35 @@ +# ChatCompletionRequestMessage + + +## Supported Types + +### ChatCompletionRequestSystemMessage + +```python +chatCompletionRequestMessage: components.ChatCompletionRequestSystemMessage = /* values here */ +``` + +### ChatCompletionRequestUserMessage + +```python +chatCompletionRequestMessage: components.ChatCompletionRequestUserMessage = /* values here */ +``` + +### ChatCompletionRequestAssistantMessage + +```python +chatCompletionRequestMessage: components.ChatCompletionRequestAssistantMessage = /* values here */ +``` + +### ChatCompletionRequestToolMessage + +```python +chatCompletionRequestMessage: components.ChatCompletionRequestToolMessage = /* values here */ +``` + +### ChatCompletionRequestFunctionMessage + +```python +chatCompletionRequestMessage: components.ChatCompletionRequestFunctionMessage = /* values here */ +``` + diff --git a/docs/models/components/chatcompletionrequestmessagecontentpart.md b/docs/models/components/chatcompletionrequestmessagecontentpart.md new file mode 100644 index 0000000..6a8d17d --- /dev/null +++ b/docs/models/components/chatcompletionrequestmessagecontentpart.md @@ -0,0 +1,17 @@ +# ChatCompletionRequestMessageContentPart + + +## Supported Types + +### ChatCompletionRequestMessageContentPartText + +```python +chatCompletionRequestMessageContentPart: components.ChatCompletionRequestMessageContentPartText = /* values here */ +``` + +### ChatCompletionRequestMessageContentPartImage + +```python +chatCompletionRequestMessageContentPart: components.ChatCompletionRequestMessageContentPartImage = /* values here */ +``` + diff --git a/docs/models/components/chatcompletionrequestmessagecontentpartimage.md b/docs/models/components/chatcompletionrequestmessagecontentpartimage.md new file mode 100644 index 0000000..c7ede4d --- /dev/null +++ b/docs/models/components/chatcompletionrequestmessagecontentpartimage.md @@ -0,0 +1,9 @@ +# ChatCompletionRequestMessageContentPartImage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------ | +| `type` | [components.ChatCompletionRequestMessageContentPartImageType](../../models/components/chatcompletionrequestmessagecontentpartimagetype.md) | :heavy_check_mark: | The type of the content part. | +| `image_url` | [components.ImageURL](../../models/components/imageurl.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestmessagecontentpartimagetype.md b/docs/models/components/chatcompletionrequestmessagecontentpartimagetype.md new file mode 100644 index 0000000..86f84a5 --- /dev/null +++ b/docs/models/components/chatcompletionrequestmessagecontentpartimagetype.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestMessageContentPartImageType + +The type of the content part. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `IMAGE_URL` | image_url | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestmessagecontentparttext.md b/docs/models/components/chatcompletionrequestmessagecontentparttext.md new file mode 100644 index 0000000..865816e --- /dev/null +++ b/docs/models/components/chatcompletionrequestmessagecontentparttext.md @@ -0,0 +1,9 @@ +# ChatCompletionRequestMessageContentPartText + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `type` | [components.Type](../../models/components/type.md) | :heavy_check_mark: | The type of the content part. | +| `text` | *str* | :heavy_check_mark: | The text content. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestsystemmessage.md b/docs/models/components/chatcompletionrequestsystemmessage.md new file mode 100644 index 0000000..064b2c5 --- /dev/null +++ b/docs/models/components/chatcompletionrequestsystemmessage.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestSystemMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| `content` | *str* | :heavy_check_mark: | The contents of the system message. | +| `role` | [components.Role](../../models/components/role.md) | :heavy_check_mark: | The role of the messages author, in this case `system`. | +| `name` | *Optional[str]* | :heavy_minus_sign: | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequesttoolmessage.md b/docs/models/components/chatcompletionrequesttoolmessage.md new file mode 100644 index 0000000..847b4c3 --- /dev/null +++ b/docs/models/components/chatcompletionrequesttoolmessage.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestToolMessage + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | +| `role` | [components.ChatCompletionRequestToolMessageRole](../../models/components/chatcompletionrequesttoolmessagerole.md) | :heavy_check_mark: | The role of the messages author, in this case `tool`. | +| `content` | *str* | :heavy_check_mark: | The contents of the tool message. | +| `tool_call_id` | *str* | :heavy_check_mark: | Tool call that this message is responding to. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequesttoolmessagerole.md b/docs/models/components/chatcompletionrequesttoolmessagerole.md new file mode 100644 index 0000000..63a56c5 --- /dev/null +++ b/docs/models/components/chatcompletionrequesttoolmessagerole.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestToolMessageRole + +The role of the messages author, in this case `tool`. + + +## Values + +| Name | Value | +| ------ | ------ | +| `TOOL` | tool | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestusermessage.md b/docs/models/components/chatcompletionrequestusermessage.md new file mode 100644 index 0000000..a6de2c3 --- /dev/null +++ b/docs/models/components/chatcompletionrequestusermessage.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestUserMessage + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------- | +| `content` | [components.Content](../../models/components/content.md) | :heavy_check_mark: | The contents of the user message.
| +| `role` | [components.ChatCompletionRequestUserMessageRole](../../models/components/chatcompletionrequestusermessagerole.md) | :heavy_check_mark: | The role of the messages author, in this case `user`. | +| `name` | *Optional[str]* | :heavy_minus_sign: | An optional name for the participant. Provides the model information to differentiate between participants of the same role. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionrequestusermessagerole.md b/docs/models/components/chatcompletionrequestusermessagerole.md new file mode 100644 index 0000000..4f0691c --- /dev/null +++ b/docs/models/components/chatcompletionrequestusermessagerole.md @@ -0,0 +1,10 @@ +# ChatCompletionRequestUserMessageRole + +The role of the messages author, in this case `user`. + + +## Values + +| Name | Value | +| ------ | ------ | +| `USER` | user | \ No newline at end of file diff --git a/docs/models/components/chatcompletionresponsemessage.md b/docs/models/components/chatcompletionresponsemessage.md new file mode 100644 index 0000000..3afa735 --- /dev/null +++ b/docs/models/components/chatcompletionresponsemessage.md @@ -0,0 +1,13 @@ +# ChatCompletionResponseMessage + +A chat completion message generated by the model. + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `content` | *Optional[str]* | :heavy_check_mark: | The contents of the message. | +| `role` | [components.ChatCompletionResponseMessageRole](../../models/components/chatcompletionresponsemessagerole.md) | :heavy_check_mark: | The role of the author of this message. | +| `tool_calls` | List[[components.ChatCompletionMessageToolCall](../../models/components/chatcompletionmessagetoolcall.md)] | :heavy_minus_sign: | The tool calls generated by the model, such as function calls. | +| ~~`function_call`~~ | [Optional[components.ChatCompletionResponseMessageFunctionCall]](../../models/components/chatcompletionresponsemessagefunctioncall.md) | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.

Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionresponsemessagefunctioncall.md b/docs/models/components/chatcompletionresponsemessagefunctioncall.md new file mode 100644 index 0000000..c59a46b --- /dev/null +++ b/docs/models/components/chatcompletionresponsemessagefunctioncall.md @@ -0,0 +1,13 @@ +# ~~ChatCompletionResponseMessageFunctionCall~~ + +Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `arguments` | *str* | :heavy_check_mark: | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | +| `name` | *str* | :heavy_check_mark: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/components/chatcompletionresponsemessagerole.md b/docs/models/components/chatcompletionresponsemessagerole.md new file mode 100644 index 0000000..52a9d93 --- /dev/null +++ b/docs/models/components/chatcompletionresponsemessagerole.md @@ -0,0 +1,10 @@ +# ChatCompletionResponseMessageRole + +The role of the author of this message. + + +## Values + +| Name | Value | +| ----------- | ----------- | +| `ASSISTANT` | assistant | \ No newline at end of file diff --git a/docs/models/components/chatcompletionstreamoptions.md b/docs/models/components/chatcompletionstreamoptions.md new file mode 100644 index 0000000..1c8b563 --- /dev/null +++ b/docs/models/components/chatcompletionstreamoptions.md @@ -0,0 +1,11 @@ +# ChatCompletionStreamOptions + +Options for streaming response. Only set this when you set `stream: true`. + + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `include_usage` | *Optional[bool]* | :heavy_minus_sign: | If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.
| \ No newline at end of file diff --git a/docs/models/components/chatcompletiontokenlogprob.md b/docs/models/components/chatcompletiontokenlogprob.md new file mode 100644 index 0000000..db7942e --- /dev/null +++ b/docs/models/components/chatcompletiontokenlogprob.md @@ -0,0 +1,11 @@ +# ChatCompletionTokenLogprob + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `token` | *str* | :heavy_check_mark: | The token. | +| `logprob` | *float* | :heavy_check_mark: | The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. | +| `bytes` | List[*int*] | :heavy_check_mark: | A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. | +| `top_logprobs` | List[[components.TopLogprobs](../../models/components/toplogprobs.md)] | :heavy_check_mark: | List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. | \ No newline at end of file diff --git a/docs/models/components/chatcompletiontool.md b/docs/models/components/chatcompletiontool.md new file mode 100644 index 0000000..95039bd --- /dev/null +++ b/docs/models/components/chatcompletiontool.md @@ -0,0 +1,9 @@ +# ChatCompletionTool + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `type` | [components.ChatCompletionToolType](../../models/components/chatcompletiontooltype.md) | :heavy_check_mark: | The type of the tool. Currently, only `function` is supported. | +| `function` | [components.FunctionObject](../../models/components/functionobject.md) | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/components/chatcompletiontoolchoiceoption.md b/docs/models/components/chatcompletiontoolchoiceoption.md new file mode 100644 index 0000000..585c4cc --- /dev/null +++ b/docs/models/components/chatcompletiontoolchoiceoption.md @@ -0,0 +1,26 @@ +# ChatCompletionToolChoiceOption + +Controls which (if any) tool is called by the model. +`none` means the model will not call any tool and instead generates a message. +`auto` means the model can pick between generating a message or calling one or more tools. +`required` means the model must call one or more tools. +Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + +`none` is the default when no tools are present. `auto` is the default if tools are present. + + + +## Supported Types + +### ChatCompletionToolChoiceOption1 + +```python +chatCompletionToolChoiceOption: components.ChatCompletionToolChoiceOption1 = /* values here */ +``` + +### ChatCompletionNamedToolChoice + +```python +chatCompletionToolChoiceOption: components.ChatCompletionNamedToolChoice = /* values here */ +``` + diff --git a/docs/models/components/chatcompletiontoolchoiceoption1.md b/docs/models/components/chatcompletiontoolchoiceoption1.md new file mode 100644 index 0000000..68fc7e5 --- /dev/null +++ b/docs/models/components/chatcompletiontoolchoiceoption1.md @@ -0,0 +1,13 @@ +# ChatCompletionToolChoiceOption1 + +`none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools. + + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `NONE` | none | +| `AUTO` | auto | +| `REQUIRED` | required | \ No newline at end of file diff --git a/docs/models/components/chatcompletiontooltype.md b/docs/models/components/chatcompletiontooltype.md new file mode 100644 index 0000000..6510992 --- /dev/null +++ b/docs/models/components/chatcompletiontooltype.md @@ -0,0 +1,10 @@ +# ChatCompletionToolType + +The type of the tool. Currently, only `function` is supported. + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `FUNCTION` | function | \ No newline at end of file diff --git a/docs/models/components/choices.md b/docs/models/components/choices.md new file mode 100644 index 0000000..25d1dfe --- /dev/null +++ b/docs/models/components/choices.md @@ -0,0 +1,11 @@ +# Choices + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `finish_reason` | [components.FinishReason](../../models/components/finishreason.md) | :heavy_check_mark: | The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
`length` if the maximum number of tokens specified in the request was reached,
`content_filter` if content was omitted due to a flag from our content filters,
`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.
| +| `index` | *int* | :heavy_check_mark: | The index of the choice in the list of choices. | +| `message` | [components.ChatCompletionResponseMessage](../../models/components/chatcompletionresponsemessage.md) | :heavy_check_mark: | A chat completion message generated by the model. | +| `logprobs` | [Optional[components.Logprobs]](../../models/components/logprobs.md) | :heavy_check_mark: | Log probability information for the choice. | \ No newline at end of file diff --git a/docs/models/components/completion.md b/docs/models/components/completion.md new file mode 100644 index 0000000..d0fd3b4 --- /dev/null +++ b/docs/models/components/completion.md @@ -0,0 +1,19 @@ +# Completion + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------ | +| `organization_id` | *str* | :heavy_check_mark: | The unique identifier for the organization. | +| `id` | *Optional[str]* | :heavy_minus_sign: | The unique identifier for this task. | +| `kind` | [Optional[components.Kind]](../../models/components/kind.md) | :heavy_minus_sign: | The kind of completion i.e. chat messages or prompt | +| `status` | [Optional[components.Status]](../../models/components/status.md) | :heavy_minus_sign: | The status of this completion. | +| `tags` | List[*str*] | :heavy_minus_sign: | The tags for this completion. | +| `request` | [Optional[components.CreateChatCompletionRequest]](../../models/components/createchatcompletionrequest.md) | :heavy_minus_sign: | N/A | +| `response` | [Optional[components.CreateChatCompletionResponse]](../../models/components/createchatcompletionresponse.md) | :heavy_minus_sign: | Represents a chat completion response returned by model, based on the provided input. | +| `stacktrace` | List[[components.Stacktrace](../../models/components/stacktrace.md)] | :heavy_minus_sign: | The stacktrace for this completion. | +| `session_id` | *Optional[str]* | :heavy_minus_sign: | The session id for this completion. | +| `duration` | *Optional[float]* | :heavy_minus_sign: | The duration of this completion in seconds. | +| `failure_kind` | *Optional[str]* | :heavy_minus_sign: | The failure kind of this completion. | +| `failure_reason` | *Optional[str]* | :heavy_minus_sign: | The failure reason of this completion. | \ No newline at end of file diff --git a/docs/models/components/completiontagsselector.md b/docs/models/components/completiontagsselector.md new file mode 100644 index 0000000..8644225 --- /dev/null +++ b/docs/models/components/completiontagsselector.md @@ -0,0 +1,9 @@ +# CompletionTagsSelector + +The completion tag matching with this task i.e. surfaced as needing feedback. + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/components/completionusage.md b/docs/models/components/completionusage.md new file mode 100644 index 0000000..d204e5d --- /dev/null +++ b/docs/models/components/completionusage.md @@ -0,0 +1,12 @@ +# CompletionUsage + +Usage statistics for the completion request. + + +## Fields + +| Field | Type | Required | Description | +| ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | ----------------------------------------------------------------- | +| `completion_tokens` | *int* | :heavy_check_mark: | Number of tokens in the generated completion. | +| `prompt_tokens` | *int* | :heavy_check_mark: | Number of tokens in the prompt. | +| `total_tokens` | *int* | :heavy_check_mark: | Total number of tokens used in the request (prompt + completion). | \ No newline at end of file diff --git a/docs/models/components/content.md b/docs/models/components/content.md new file mode 100644 index 0000000..098f59f --- /dev/null +++ b/docs/models/components/content.md @@ -0,0 +1,20 @@ +# Content + +The contents of the user message. + + + +## Supported Types + +### + +```python +content: str = /* values here */ +``` + +### + +```python +content: List[components.ChatCompletionRequestMessageContentPart] = /* values here */ +``` + diff --git a/docs/models/components/createchatcompletionrequest.md b/docs/models/components/createchatcompletionrequest.md new file mode 100644 index 0000000..3bd6281 --- /dev/null +++ b/docs/models/components/createchatcompletionrequest.md @@ -0,0 +1,28 @@ +# CreateChatCompletionRequest + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `messages` | List[[components.ChatCompletionRequestMessage](../../models/components/chatcompletionrequestmessage.md)] | :heavy_check_mark: | A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). | | +| `model` | [components.Model](../../models/components/model.md) | :heavy_check_mark: | ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. | gpt-4-turbo | +| `frequency_penalty` | *Optional[float]* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim.

[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
| | +| `logit_bias` | Dict[str, *int*] | :heavy_minus_sign: | Modify the likelihood of specified tokens appearing in the completion.

Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token.
| | +| `logprobs` | *Optional[bool]* | :heavy_minus_sign: | Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. | | +| `top_logprobs` | *Optional[int]* | :heavy_minus_sign: | An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. | | +| `max_tokens` | *Optional[int]* | :heavy_minus_sign: | The maximum number of [tokens](/tokenizer) that can be generated in the chat completion.

The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens.
| | +| `n` | *Optional[int]* | :heavy_minus_sign: | How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. | 1 | +| `presence_penalty` | *Optional[float]* | :heavy_minus_sign: | Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics.

[See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details)
| | +| `response_format` | [Optional[components.ResponseFormat]](../../models/components/responseformat.md) | :heavy_minus_sign: | An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`.

Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON.

**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length.
| | +| `seed` | *Optional[int]* | :heavy_minus_sign: | This feature is in Beta.
If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result.
Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend.
| | +| `stop` | [Optional[components.Stop]](../../models/components/stop.md) | :heavy_minus_sign: | Up to 4 sequences where the API will stop generating further tokens.
| | +| `stream` | *Optional[bool]* | :heavy_minus_sign: | If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).
| | +| `stream_options` | [Optional[components.ChatCompletionStreamOptions]](../../models/components/chatcompletionstreamoptions.md) | :heavy_minus_sign: | Options for streaming response. Only set this when you set `stream: true`.
| | +| `temperature` | *Optional[float]* | :heavy_minus_sign: | What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic.

We generally recommend altering this or `top_p` but not both.
| 1 | +| `top_p` | *Optional[float]* | :heavy_minus_sign: | An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered.

We generally recommend altering this or `temperature` but not both.
| 1 | +| `tools` | List[[components.ChatCompletionTool](../../models/components/chatcompletiontool.md)] | :heavy_minus_sign: | A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.
| | +| `tool_choice` | [Optional[components.ChatCompletionToolChoiceOption]](../../models/components/chatcompletiontoolchoiceoption.md) | :heavy_minus_sign: | Controls which (if any) tool is called by the model.
`none` means the model will not call any tool and instead generates a message.
`auto` means the model can pick between generating a message or calling one or more tools.
`required` means the model must call one or more tools.
Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool.

`none` is the default when no tools are present. `auto` is the default if tools are present.
| | +| `user` | *Optional[str]* | :heavy_minus_sign: | A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).
| user-1234 | +| ~~`function_call`~~ | [Optional[components.CreateChatCompletionRequestFunctionCall]](../../models/components/createchatcompletionrequestfunctioncall.md) | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.

Deprecated in favor of `tool_choice`.

Controls which (if any) function is called by the model.
`none` means the model will not call a function and instead generates a message.
`auto` means the model can pick between generating a message or calling a function.
Specifying a particular function via `{"name": "my_function"}` forces the model to call that function.

`none` is the default when no functions are present. `auto` is the default if functions are present.
| | +| ~~`functions`~~ | List[[components.ChatCompletionFunctions](../../models/components/chatcompletionfunctions.md)] | :heavy_minus_sign: | : warning: ** DEPRECATED **: This will be removed in a future release, please migrate away from it as soon as possible.

Deprecated in favor of `tools`.

A list of functions the model may generate JSON inputs for.
| | \ No newline at end of file diff --git a/docs/models/components/createchatcompletionrequestfunctioncall.md b/docs/models/components/createchatcompletionrequestfunctioncall.md new file mode 100644 index 0000000..94d0ad5 --- /dev/null +++ b/docs/models/components/createchatcompletionrequestfunctioncall.md @@ -0,0 +1,29 @@ +# ~~CreateChatCompletionRequestFunctionCall~~ + +Deprecated in favor of `tool_choice`. + +Controls which (if any) function is called by the model. +`none` means the model will not call a function and instead generates a message. +`auto` means the model can pick between generating a message or calling a function. +Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + +`none` is the default when no functions are present. `auto` is the default if functions are present. + + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + + +## Supported Types + +### One + +```python +createChatCompletionRequestFunctionCall: components.One = /* values here */ +``` + +### ChatCompletionFunctionCallOption + +```python +createChatCompletionRequestFunctionCall: components.ChatCompletionFunctionCallOption = /* values here */ +``` + diff --git a/docs/models/components/createchatcompletionrequesttype.md b/docs/models/components/createchatcompletionrequesttype.md new file mode 100644 index 0000000..2516d58 --- /dev/null +++ b/docs/models/components/createchatcompletionrequesttype.md @@ -0,0 +1,11 @@ +# CreateChatCompletionRequestType + +Must be one of `text` or `json_object`. + + +## Values + +| Name | Value | +| ------------- | ------------- | +| `TEXT` | text | +| `JSON_OBJECT` | json_object | \ No newline at end of file diff --git a/docs/models/components/createchatcompletionresponse.md b/docs/models/components/createchatcompletionresponse.md new file mode 100644 index 0000000..e68de04 --- /dev/null +++ b/docs/models/components/createchatcompletionresponse.md @@ -0,0 +1,16 @@ +# CreateChatCompletionResponse + +Represents a chat completion response returned by model, based on the provided input. + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `id` | *str* | :heavy_check_mark: | A unique identifier for the chat completion. | +| `choices` | List[[components.Choices](../../models/components/choices.md)] | :heavy_check_mark: | A list of chat completion choices. Can be more than one if `n` is greater than 1. | +| `created` | *int* | :heavy_check_mark: | The Unix timestamp (in seconds) of when the chat completion was created. | +| `model` | *str* | :heavy_check_mark: | The model used for the chat completion. | +| `object` | [components.Object](../../models/components/object.md) | :heavy_check_mark: | The object type, which is always `chat.completion`. | +| `system_fingerprint` | *Optional[str]* | :heavy_minus_sign: | This fingerprint represents the backend configuration that the model runs with.

Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
| +| `usage` | [Optional[components.CompletionUsage]](../../models/components/completionusage.md) | :heavy_minus_sign: | Usage statistics for the completion request. | \ No newline at end of file diff --git a/docs/models/components/detail.md b/docs/models/components/detail.md new file mode 100644 index 0000000..64193ae --- /dev/null +++ b/docs/models/components/detail.md @@ -0,0 +1,12 @@ +# Detail + +Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + + +## Values + +| Name | Value | +| ------ | ------ | +| `AUTO` | auto | +| `LOW` | low | +| `HIGH` | high | \ No newline at end of file diff --git a/docs/models/components/feedback.md b/docs/models/components/feedback.md new file mode 100644 index 0000000..53c9350 --- /dev/null +++ b/docs/models/components/feedback.md @@ -0,0 +1,14 @@ +# Feedback + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `task_id` | *str* | :heavy_check_mark: | The unique identifier for the task associated with this feedback. | +| `json_values` | [components.JSONValues](../../models/components/jsonvalues.md) | :heavy_check_mark: | The values of the feedback. Must be valid JSON according to the task schema. | +| `matched_completion_ids` | List[*str*] | :heavy_check_mark: | The matched completion ids associated with this feedback. | +| `comment` | *str* | :heavy_check_mark: | The comment associated with this feedback. | +| `id` | *Optional[str]* | :heavy_minus_sign: | The unique identifier for this feedback. | +| `created_at_ms` | *Optional[float]* | :heavy_minus_sign: | The epoch this schema was created. | +| `completions_summary` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/components/finishreason.md b/docs/models/components/finishreason.md new file mode 100644 index 0000000..b17e41b --- /dev/null +++ b/docs/models/components/finishreason.md @@ -0,0 +1,18 @@ +# FinishReason + +The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, +`length` if the maximum number of tokens specified in the request was reached, +`content_filter` if content was omitted due to a flag from our content filters, +`tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + + + +## Values + +| Name | Value | +| ---------------- | ---------------- | +| `STOP` | stop | +| `LENGTH` | length | +| `TOOL_CALLS` | tool_calls | +| `CONTENT_FILTER` | content_filter | +| `FUNCTION_CALL` | function_call | \ No newline at end of file diff --git a/docs/models/components/function.md b/docs/models/components/function.md new file mode 100644 index 0000000..a5409e7 --- /dev/null +++ b/docs/models/components/function.md @@ -0,0 +1,11 @@ +# Function + +The function that the model called. + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `name` | *str* | :heavy_check_mark: | The name of the function to call. | +| `arguments` | *str* | :heavy_check_mark: | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | \ No newline at end of file diff --git a/docs/models/components/functioncall.md b/docs/models/components/functioncall.md new file mode 100644 index 0000000..da6eae4 --- /dev/null +++ b/docs/models/components/functioncall.md @@ -0,0 +1,13 @@ +# ~~FunctionCall~~ + +Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + +> :warning: **DEPRECATED**: This will be removed in a future release, please migrate away from it as soon as possible. + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| `arguments` | *str* | :heavy_check_mark: | The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. | +| `name` | *str* | :heavy_check_mark: | The name of the function to call. | \ No newline at end of file diff --git a/docs/models/components/functionobject.md b/docs/models/components/functionobject.md new file mode 100644 index 0000000..d4c0d65 --- /dev/null +++ b/docs/models/components/functionobject.md @@ -0,0 +1,10 @@ +# FunctionObject + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `name` | *str* | :heavy_check_mark: | The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. | +| `description` | *Optional[str]* | :heavy_minus_sign: | A description of what the function does, used by the model to choose when and how to call the function. | +| `parameters` | Dict[str, *Any*] | :heavy_minus_sign: | The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format.

Omitting `parameters` defines a function with an empty parameter list. | \ No newline at end of file diff --git a/docs/models/components/httpmetadata.md b/docs/models/components/httpmetadata.md new file mode 100644 index 0000000..5febcce --- /dev/null +++ b/docs/models/components/httpmetadata.md @@ -0,0 +1,9 @@ +# HTTPMetadata + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------- | +| `response` | [requests.Response](https://requests.readthedocs.io/en/latest/api/#requests.Response) | :heavy_check_mark: | Raw HTTP response; suitable for custom response parsing | +| `request` | [requests.Request](https://requests.readthedocs.io/en/latest/api/#requests.Request) | :heavy_check_mark: | Raw HTTP request; suitable for debugging | \ No newline at end of file diff --git a/docs/models/components/imageurl.md b/docs/models/components/imageurl.md new file mode 100644 index 0000000..88cd56c --- /dev/null +++ b/docs/models/components/imageurl.md @@ -0,0 +1,9 @@ +# ImageURL + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | +| `url` | *str* | :heavy_check_mark: | Either a URL of the image or the base64 encoded image data. | +| `detail` | [Optional[components.Detail]](../../models/components/detail.md) | :heavy_minus_sign: | Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). | \ No newline at end of file diff --git a/docs/models/components/jsonschema.md b/docs/models/components/jsonschema.md new file mode 100644 index 0000000..f807e6f --- /dev/null +++ b/docs/models/components/jsonschema.md @@ -0,0 +1,9 @@ +# JSONSchema + +The schema of the task. Must be valid JSON Schema. + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/components/jsonvalues.md b/docs/models/components/jsonvalues.md new file mode 100644 index 0000000..ec0c8be --- /dev/null +++ b/docs/models/components/jsonvalues.md @@ -0,0 +1,9 @@ +# JSONValues + +The values of the feedback. Must be valid JSON according to the task schema. + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/components/kind.md b/docs/models/components/kind.md new file mode 100644 index 0000000..56c3cce --- /dev/null +++ b/docs/models/components/kind.md @@ -0,0 +1,11 @@ +# Kind + +The kind of completion i.e. chat messages or prompt + + +## Values + +| Name | Value | +| -------- | -------- | +| `CHAT` | chat | +| `PROMPT` | prompt | \ No newline at end of file diff --git a/docs/models/components/logprobs.md b/docs/models/components/logprobs.md new file mode 100644 index 0000000..292c00b --- /dev/null +++ b/docs/models/components/logprobs.md @@ -0,0 +1,10 @@ +# Logprobs + +Log probability information for the choice. + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| `content` | List[[components.ChatCompletionTokenLogprob](../../models/components/chatcompletiontokenlogprob.md)] | :heavy_check_mark: | A list of message content tokens with log probability information. | \ No newline at end of file diff --git a/docs/models/components/model.md b/docs/models/components/model.md new file mode 100644 index 0000000..fae6cf8 --- /dev/null +++ b/docs/models/components/model.md @@ -0,0 +1,19 @@ +# Model + +ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + + +## Supported Types + +### + +```python +model: str = /* values here */ +``` + +### Two + +```python +model: components.Two = /* values here */ +``` + diff --git a/docs/models/components/object.md b/docs/models/components/object.md new file mode 100644 index 0000000..48ee4f7 --- /dev/null +++ b/docs/models/components/object.md @@ -0,0 +1,10 @@ +# Object + +The object type, which is always `chat.completion`. + + +## Values + +| Name | Value | +| ----------------- | ----------------- | +| `CHAT_COMPLETION` | chat.completion | \ No newline at end of file diff --git a/docs/models/components/one.md b/docs/models/components/one.md new file mode 100644 index 0000000..4cef8c4 --- /dev/null +++ b/docs/models/components/one.md @@ -0,0 +1,12 @@ +# One + +`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function. + + + +## Values + +| Name | Value | +| ------ | ------ | +| `NONE` | none | +| `AUTO` | auto | \ No newline at end of file diff --git a/docs/models/components/responseformat.md b/docs/models/components/responseformat.md new file mode 100644 index 0000000..18ebda5 --- /dev/null +++ b/docs/models/components/responseformat.md @@ -0,0 +1,15 @@ +# ResponseFormat + +An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + +Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + +**Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + + + +## Fields + +| Field | Type | Required | Description | Example | +| ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------ | +| `type` | [Optional[components.CreateChatCompletionRequestType]](../../models/components/createchatcompletionrequesttype.md) | :heavy_minus_sign: | Must be one of `text` or `json_object`. | json_object | \ No newline at end of file diff --git a/docs/models/components/role.md b/docs/models/components/role.md new file mode 100644 index 0000000..f26f8b1 --- /dev/null +++ b/docs/models/components/role.md @@ -0,0 +1,10 @@ +# Role + +The role of the messages author, in this case `system`. + + +## Values + +| Name | Value | +| -------- | -------- | +| `SYSTEM` | system | \ No newline at end of file diff --git a/docs/models/components/security.md b/docs/models/components/security.md new file mode 100644 index 0000000..8e61b40 --- /dev/null +++ b/docs/models/components/security.md @@ -0,0 +1,8 @@ +# Security + + +## Fields + +| Field | Type | Required | Description | +| ------------------ | ------------------ | ------------------ | ------------------ | +| `log10_token` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/components/session.md b/docs/models/components/session.md new file mode 100644 index 0000000..0be58bd --- /dev/null +++ b/docs/models/components/session.md @@ -0,0 +1,8 @@ +# Session + + +## Fields + +| Field | Type | Required | Description | +| --------------------------------------- | --------------------------------------- | --------------------------------------- | --------------------------------------- | +| `id` | *Optional[str]* | :heavy_minus_sign: | The unique identifier for this session. | \ No newline at end of file diff --git a/docs/models/components/stacktrace.md b/docs/models/components/stacktrace.md new file mode 100644 index 0000000..78a85cd --- /dev/null +++ b/docs/models/components/stacktrace.md @@ -0,0 +1,11 @@ +# Stacktrace + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | ------------------------------------------------------- | +| `file` | *str* | :heavy_check_mark: | The file associated with this stacktrace. | +| `line` | *str* | :heavy_check_mark: | The line associated with this stacktrace. | +| `lineno` | *float* | :heavy_check_mark: | The line number associated with this stacktrace. | +| `name` | *str* | :heavy_check_mark: | The function or module associated with this stacktrace. | \ No newline at end of file diff --git a/docs/models/components/status.md b/docs/models/components/status.md new file mode 100644 index 0000000..f97f8eb --- /dev/null +++ b/docs/models/components/status.md @@ -0,0 +1,12 @@ +# Status + +The status of this completion. + + +## Values + +| Name | Value | +| ---------- | ---------- | +| `STARTED` | started | +| `FINISHED` | finished | +| `FAILED` | failed | \ No newline at end of file diff --git a/docs/models/components/stop.md b/docs/models/components/stop.md new file mode 100644 index 0000000..9d4a37b --- /dev/null +++ b/docs/models/components/stop.md @@ -0,0 +1,20 @@ +# Stop + +Up to 4 sequences where the API will stop generating further tokens. + + + +## Supported Types + +### + +```python +stop: str = /* values here */ +``` + +### + +```python +stop: List[str] = /* values here */ +``` + diff --git a/docs/models/components/task.md b/docs/models/components/task.md new file mode 100644 index 0000000..6504fe6 --- /dev/null +++ b/docs/models/components/task.md @@ -0,0 +1,13 @@ +# Task + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `json_schema` | [components.JSONSchema](../../models/components/jsonschema.md) | :heavy_check_mark: | The schema of the task. Must be valid JSON Schema. | +| `name` | *str* | :heavy_check_mark: | The name of the task. | +| `instruction` | *str* | :heavy_check_mark: | The instructions for this task. | +| `completion_tags_selector` | [components.CompletionTagsSelector](../../models/components/completiontagsselector.md) | :heavy_check_mark: | The completion tag matching with this task i.e. surfaced as needing feedback. | +| `id` | *Optional[str]* | :heavy_minus_sign: | The unique identifier for this task. | +| `created_at_ms` | *Optional[float]* | :heavy_minus_sign: | The epoch this schema was created. | \ No newline at end of file diff --git a/docs/models/components/toplogprobs.md b/docs/models/components/toplogprobs.md new file mode 100644 index 0000000..17e8a3c --- /dev/null +++ b/docs/models/components/toplogprobs.md @@ -0,0 +1,10 @@ +# TopLogprobs + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | +| `token` | *str* | :heavy_check_mark: | The token. | +| `logprob` | *float* | :heavy_check_mark: | The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. | +| `bytes` | List[*int*] | :heavy_check_mark: | A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. | \ No newline at end of file diff --git a/docs/models/components/two.md b/docs/models/components/two.md new file mode 100644 index 0000000..598cf02 --- /dev/null +++ b/docs/models/components/two.md @@ -0,0 +1,26 @@ +# Two + + +## Values + +| Name | Value | +| ------------------------ | ------------------------ | +| `GPT_4_TURBO` | gpt-4-turbo | +| `GPT_4_TURBO_2024_04_09` | gpt-4-turbo-2024-04-09 | +| `GPT_4_0125_PREVIEW` | gpt-4-0125-preview | +| `GPT_4_TURBO_PREVIEW` | gpt-4-turbo-preview | +| `GPT_4_1106_PREVIEW` | gpt-4-1106-preview | +| `GPT_4_VISION_PREVIEW` | gpt-4-vision-preview | +| `GPT_4` | gpt-4 | +| `GPT_4_0314` | gpt-4-0314 | +| `GPT_4_0613` | gpt-4-0613 | +| `GPT_4_32K` | gpt-4-32k | +| `GPT_4_32K_0314` | gpt-4-32k-0314 | +| `GPT_4_32K_0613` | gpt-4-32k-0613 | +| `GPT_3_5_TURBO` | gpt-3.5-turbo | +| `GPT_3_5_TURBO_16K` | gpt-3.5-turbo-16k | +| `GPT_3_5_TURBO_0301` | gpt-3.5-turbo-0301 | +| `GPT_3_5_TURBO_0613` | gpt-3.5-turbo-0613 | +| `GPT_3_5_TURBO_1106` | gpt-3.5-turbo-1106 | +| `GPT_3_5_TURBO_0125` | gpt-3.5-turbo-0125 | +| `GPT_3_5_TURBO_16K_0613` | gpt-3.5-turbo-16k-0613 | \ No newline at end of file diff --git a/docs/models/components/type.md b/docs/models/components/type.md new file mode 100644 index 0000000..12481b9 --- /dev/null +++ b/docs/models/components/type.md @@ -0,0 +1,10 @@ +# Type + +The type of the content part. + + +## Values + +| Name | Value | +| ------ | ------ | +| `TEXT` | text | \ No newline at end of file diff --git a/docs/models/internal/globals.md b/docs/models/internal/globals.md new file mode 100644 index 0000000..4e068bf --- /dev/null +++ b/docs/models/internal/globals.md @@ -0,0 +1,8 @@ +# Globals + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createfeedbacktaskresponse.md b/docs/models/operations/createfeedbacktaskresponse.md new file mode 100644 index 0000000..152c19b --- /dev/null +++ b/docs/models/operations/createfeedbacktaskresponse.md @@ -0,0 +1,9 @@ +# CreateFeedbackTaskResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `task` | [Optional[components.Task]](../../models/components/task.md) | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/createglobals.md b/docs/models/operations/createglobals.md new file mode 100644 index 0000000..85995a4 --- /dev/null +++ b/docs/models/operations/createglobals.md @@ -0,0 +1,8 @@ +# CreateGlobals + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createrequest.md b/docs/models/operations/createrequest.md new file mode 100644 index 0000000..17eab1a --- /dev/null +++ b/docs/models/operations/createrequest.md @@ -0,0 +1,9 @@ +# CreateRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `completion` | [components.Completion](../../models/components/completion.md) | :heavy_check_mark: | N/A | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createresponse.md b/docs/models/operations/createresponse.md new file mode 100644 index 0000000..0b03d23 --- /dev/null +++ b/docs/models/operations/createresponse.md @@ -0,0 +1,10 @@ +# CreateResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `any` | *Optional[Any]* | :heavy_minus_sign: | Created | +| `completion` | [Optional[components.Completion]](../../models/components/completion.md) | :heavy_minus_sign: | Created | \ No newline at end of file diff --git a/docs/models/operations/createsessionglobals.md b/docs/models/operations/createsessionglobals.md new file mode 100644 index 0000000..7625804 --- /dev/null +++ b/docs/models/operations/createsessionglobals.md @@ -0,0 +1,8 @@ +# CreateSessionGlobals + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createsessionrequest.md b/docs/models/operations/createsessionrequest.md new file mode 100644 index 0000000..98deec1 --- /dev/null +++ b/docs/models/operations/createsessionrequest.md @@ -0,0 +1,8 @@ +# CreateSessionRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/createsessionresponse.md b/docs/models/operations/createsessionresponse.md new file mode 100644 index 0000000..7aad7ac --- /dev/null +++ b/docs/models/operations/createsessionresponse.md @@ -0,0 +1,9 @@ +# CreateSessionResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------ | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[operations.CreateSessionResponseBody]](../../models/operations/createsessionresponsebody.md) | :heavy_minus_sign: | Created | \ No newline at end of file diff --git a/docs/models/operations/createsessionresponsebody.md b/docs/models/operations/createsessionresponsebody.md new file mode 100644 index 0000000..705a90c --- /dev/null +++ b/docs/models/operations/createsessionresponsebody.md @@ -0,0 +1,10 @@ +# CreateSessionResponseBody + +Created + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `session` | [Optional[components.Session]](../../models/components/session.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/getfeedbacktaskrequest.md b/docs/models/operations/getfeedbacktaskrequest.md new file mode 100644 index 0000000..0300e8d --- /dev/null +++ b/docs/models/operations/getfeedbacktaskrequest.md @@ -0,0 +1,8 @@ +# GetFeedbackTaskRequest + + +## Fields + +| Field | Type | Required | Description | +| --------------------- | --------------------- | --------------------- | --------------------- | +| `task_id` | *str* | :heavy_check_mark: | The task id to fetch. | \ No newline at end of file diff --git a/docs/models/operations/getfeedbacktaskresponse.md b/docs/models/operations/getfeedbacktaskresponse.md new file mode 100644 index 0000000..03dc19f --- /dev/null +++ b/docs/models/operations/getfeedbacktaskresponse.md @@ -0,0 +1,9 @@ +# GetFeedbackTaskResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `task` | [Optional[components.Task]](../../models/components/task.md) | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/getglobals.md b/docs/models/operations/getglobals.md new file mode 100644 index 0000000..aedaf05 --- /dev/null +++ b/docs/models/operations/getglobals.md @@ -0,0 +1,8 @@ +# GetGlobals + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/operations/getrequest.md b/docs/models/operations/getrequest.md new file mode 100644 index 0000000..edacbe6 --- /dev/null +++ b/docs/models/operations/getrequest.md @@ -0,0 +1,9 @@ +# GetRequest + + +## Fields + +| Field | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `feedback_id` | *str* | :heavy_check_mark: | The feedback id to fetch. | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/getresponse.md b/docs/models/operations/getresponse.md new file mode 100644 index 0000000..de39581 --- /dev/null +++ b/docs/models/operations/getresponse.md @@ -0,0 +1,9 @@ +# GetResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `feedback` | [Optional[components.Feedback]](../../models/components/feedback.md) | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/jsonvalues.md b/docs/models/operations/jsonvalues.md new file mode 100644 index 0000000..ec0c8be --- /dev/null +++ b/docs/models/operations/jsonvalues.md @@ -0,0 +1,9 @@ +# JSONValues + +The values of the feedback. Must be valid JSON according to the task schema. + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/operations/listfeedbacktasksresponse.md b/docs/models/operations/listfeedbacktasksresponse.md new file mode 100644 index 0000000..9d6d26b --- /dev/null +++ b/docs/models/operations/listfeedbacktasksresponse.md @@ -0,0 +1,9 @@ +# ListFeedbackTasksResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | ------------------------------------------------------------------ | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `tasks` | List[[components.Task](../../models/components/task.md)] | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/listglobals.md b/docs/models/operations/listglobals.md new file mode 100644 index 0000000..28bcc3e --- /dev/null +++ b/docs/models/operations/listglobals.md @@ -0,0 +1,8 @@ +# ListGlobals + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listrequest.md b/docs/models/operations/listrequest.md new file mode 100644 index 0000000..2fbbad5 --- /dev/null +++ b/docs/models/operations/listrequest.md @@ -0,0 +1,9 @@ +# ListRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `request_body` | [Optional[operations.ListRequestBody]](../../models/operations/listrequestbody.md) | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listrequestbody.md b/docs/models/operations/listrequestbody.md new file mode 100644 index 0000000..66bef5e --- /dev/null +++ b/docs/models/operations/listrequestbody.md @@ -0,0 +1,11 @@ +# ListRequestBody + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | ------------------------------------------- | +| `offset` | *Optional[int]* | :heavy_minus_sign: | The offset to start fetching feedback from. | +| `limit` | *Optional[int]* | :heavy_minus_sign: | The number of feedback to fetch. | +| `completion_id` | *Optional[str]* | :heavy_minus_sign: | The completion id to fetch feedback for. | +| `task_id` | *Optional[str]* | :heavy_minus_sign: | The task id to fetch feedback for. | \ No newline at end of file diff --git a/docs/models/operations/listresponse.md b/docs/models/operations/listresponse.md new file mode 100644 index 0000000..d8bf0e0 --- /dev/null +++ b/docs/models/operations/listresponse.md @@ -0,0 +1,9 @@ +# ListResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[operations.ListResponseBody]](../../models/operations/listresponsebody.md) | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/listresponsebody.md b/docs/models/operations/listresponsebody.md new file mode 100644 index 0000000..fa0968c --- /dev/null +++ b/docs/models/operations/listresponsebody.md @@ -0,0 +1,10 @@ +# ListResponseBody + +OK + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | ---------------------------------------------------------------- | +| `feedback` | List[[components.Feedback](../../models/components/feedback.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listungradedglobals.md b/docs/models/operations/listungradedglobals.md new file mode 100644 index 0000000..c2edfa5 --- /dev/null +++ b/docs/models/operations/listungradedglobals.md @@ -0,0 +1,8 @@ +# ListUngradedGlobals + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listungradedrequest.md b/docs/models/operations/listungradedrequest.md new file mode 100644 index 0000000..ff2260d --- /dev/null +++ b/docs/models/operations/listungradedrequest.md @@ -0,0 +1,8 @@ +# ListUngradedRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/listungradedresponse.md b/docs/models/operations/listungradedresponse.md new file mode 100644 index 0000000..31e389b --- /dev/null +++ b/docs/models/operations/listungradedresponse.md @@ -0,0 +1,9 @@ +# ListUngradedResponse + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `object` | [Optional[operations.ListUngradedResponseBody]](../../models/operations/listungradedresponsebody.md) | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/listungradedresponsebody.md b/docs/models/operations/listungradedresponsebody.md new file mode 100644 index 0000000..306388a --- /dev/null +++ b/docs/models/operations/listungradedresponsebody.md @@ -0,0 +1,10 @@ +# ListUngradedResponseBody + +OK + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `completions` | List[[components.Completion](../../models/components/completion.md)] | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/one.md b/docs/models/operations/one.md new file mode 100644 index 0000000..f81cec8 --- /dev/null +++ b/docs/models/operations/one.md @@ -0,0 +1,17 @@ +# One + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------- | +| `task_id` | *str* | :heavy_check_mark: | The unique identifier for the task associated with this feedback. | +| `json_values` | [operations.JSONValues](../../models/operations/jsonvalues.md) | :heavy_check_mark: | The values of the feedback. Must be valid JSON according to the task schema. | +| `matched_completion_ids` | List[*str*] | :heavy_check_mark: | The matched completion ids associated with this feedback. | +| `comment` | *str* | :heavy_check_mark: | The comment associated with this feedback. | +| `completion_tags_selector` | List[*str*] | :heavy_check_mark: | The completion tags associated with this feedback. | +| `id` | *Optional[str]* | :heavy_minus_sign: | The unique identifier for this feedback. | +| `created_at_ms` | *Optional[float]* | :heavy_minus_sign: | The epoch this schema was created. | +| `completions_summary` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `allow_unmatched_feedback` | *Optional[bool]* | :heavy_minus_sign: | Whether to allow unmatched feedback. Defaults to false. | +| `max_matched_completions` | *Optional[int]* | :heavy_minus_sign: | The maximum number of matched completions. Returns error if exceeded. Defaults to 100. | \ No newline at end of file diff --git a/docs/models/operations/requestbodyjsonvalues.md b/docs/models/operations/requestbodyjsonvalues.md new file mode 100644 index 0000000..89a5da3 --- /dev/null +++ b/docs/models/operations/requestbodyjsonvalues.md @@ -0,0 +1,9 @@ +# RequestBodyJSONValues + +The values of the feedback. Must be valid JSON according to the task schema. + + +## Fields + +| Field | Type | Required | Description | +| ----------- | ----------- | ----------- | ----------- | \ No newline at end of file diff --git a/docs/models/operations/two.md b/docs/models/operations/two.md new file mode 100644 index 0000000..3849fd8 --- /dev/null +++ b/docs/models/operations/two.md @@ -0,0 +1,15 @@ +# Two + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------ | +| `task_id` | *str* | :heavy_check_mark: | The unique identifier for the task associated with this feedback. | +| `json_values` | [operations.RequestBodyJSONValues](../../models/operations/requestbodyjsonvalues.md) | :heavy_check_mark: | The values of the feedback. Must be valid JSON according to the task schema. | +| `matched_completion_ids` | List[*str*] | :heavy_check_mark: | The matched completion ids associated with this feedback. | +| `comment` | *str* | :heavy_check_mark: | The comment associated with this feedback. | +| `completion_ids` | List[*str*] | :heavy_check_mark: | The completion ids to associate with this feedback. | +| `id` | *Optional[str]* | :heavy_minus_sign: | The unique identifier for this feedback. | +| `created_at_ms` | *Optional[float]* | :heavy_minus_sign: | The epoch this schema was created. | +| `completions_summary` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/updateglobals.md b/docs/models/operations/updateglobals.md new file mode 100644 index 0000000..1496dd5 --- /dev/null +++ b/docs/models/operations/updateglobals.md @@ -0,0 +1,8 @@ +# UpdateGlobals + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/operations/updaterequest.md b/docs/models/operations/updaterequest.md new file mode 100644 index 0000000..19c8181 --- /dev/null +++ b/docs/models/operations/updaterequest.md @@ -0,0 +1,10 @@ +# UpdateRequest + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `completion_id` | *str* | :heavy_check_mark: | The completion id to update. | +| `completion` | [components.Completion](../../models/components/completion.md) | :heavy_check_mark: | N/A | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/updateresponse.md b/docs/models/operations/updateresponse.md new file mode 100644 index 0000000..1e78a89 --- /dev/null +++ b/docs/models/operations/updateresponse.md @@ -0,0 +1,9 @@ +# UpdateResponse + + +## Fields + +| Field | Type | Required | Description | +| ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | ------------------------------------------------------------------------ | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `completion` | [Optional[components.Completion]](../../models/components/completion.md) | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/models/operations/uploadglobals.md b/docs/models/operations/uploadglobals.md new file mode 100644 index 0000000..37c8eac --- /dev/null +++ b/docs/models/operations/uploadglobals.md @@ -0,0 +1,8 @@ +# UploadGlobals + + +## Fields + +| Field | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *str* | :heavy_check_mark: | N/A | \ No newline at end of file diff --git a/docs/models/operations/uploadrequest.md b/docs/models/operations/uploadrequest.md new file mode 100644 index 0000000..0136602 --- /dev/null +++ b/docs/models/operations/uploadrequest.md @@ -0,0 +1,9 @@ +# UploadRequest + + +## Fields + +| Field | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `request_body` | [operations.UploadRequestBody](../../models/operations/uploadrequestbody.md) | :heavy_check_mark: | N/A | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | \ No newline at end of file diff --git a/docs/models/operations/uploadrequestbody.md b/docs/models/operations/uploadrequestbody.md new file mode 100644 index 0000000..0b8d304 --- /dev/null +++ b/docs/models/operations/uploadrequestbody.md @@ -0,0 +1,17 @@ +# UploadRequestBody + + +## Supported Types + +### One + +```python +uploadRequestBody: operations.One = /* values here */ +``` + +### Two + +```python +uploadRequestBody: operations.Two = /* values here */ +``` + diff --git a/docs/models/operations/uploadresponse.md b/docs/models/operations/uploadresponse.md new file mode 100644 index 0000000..9980fe8 --- /dev/null +++ b/docs/models/operations/uploadresponse.md @@ -0,0 +1,9 @@ +# UploadResponse + + +## Fields + +| Field | Type | Required | Description | +| -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | -------------------------------------------------------------------- | +| `http_meta` | [components.HTTPMetadata](../../models/components/httpmetadata.md) | :heavy_check_mark: | N/A | +| `feedback` | [Optional[components.Feedback]](../../models/components/feedback.md) | :heavy_minus_sign: | OK | \ No newline at end of file diff --git a/docs/sdks/completions/README.md b/docs/sdks/completions/README.md new file mode 100644 index 0000000..0b3bf39 --- /dev/null +++ b/docs/sdks/completions/README.md @@ -0,0 +1,170 @@ +# Completions +(*completions*) + +## Overview + +Completions + +### Available Operations + +* [create](#create) - Create a completion +* [update](#update) - Update completion by id. +* [list_ungraded](#list_ungraded) - List ungraded completions i.e. completions that have not been associated with feedback but matches task selector. + +## create + +Create a completion + +### Example Usage + +```python +import log10 +from log10.models import components + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.completions.create(completion=components.Completion( + organization_id='', + request=components.CreateChatCompletionRequest( + messages=[ + components.ChatCompletionRequestAssistantMessage( + role=components.ChatCompletionRequestAssistantMessageRole.ASSISTANT, + ), + ], + model=components.Two.GPT_4_TURBO, + n=1, + response_format=components.ResponseFormat( + type=components.CreateChatCompletionRequestType.JSON_OBJECT, + ), + temperature=1, + top_p=1, + user='user-1234', + ), +), x_log10_organization='') + +if res.any is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `completion` | [components.Completion](../../models/components/completion.md) | :heavy_check_mark: | N/A | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | + + +### Response + +**[operations.CreateResponse](../../models/operations/createresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | + +## update + +Update completion by id. + +### Example Usage + +```python +import log10 +from log10.models import components + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.completions.update(completion_id='', completion=components.Completion( + organization_id='', + request=components.CreateChatCompletionRequest( + messages=[ + components.ChatCompletionRequestFunctionMessage( + role=components.ChatCompletionRequestFunctionMessageRole.FUNCTION, + content='', + name='', + ), + ], + model=components.Two.GPT_4_TURBO, + n=1, + response_format=components.ResponseFormat( + type=components.CreateChatCompletionRequestType.JSON_OBJECT, + ), + temperature=1, + top_p=1, + user='user-1234', + ), +), x_log10_organization='') + +if res.completion is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | -------------------------------------------------------------- | +| `completion_id` | *str* | :heavy_check_mark: | The completion id to update. | +| `completion` | [components.Completion](../../models/components/completion.md) | :heavy_check_mark: | N/A | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | + + +### Response + +**[operations.UpdateResponse](../../models/operations/updateresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | + +## list_ungraded + +List ungraded completions i.e. completions that have not been associated with feedback but matches task selector. + +### Example Usage + +```python +import log10 + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.completions.list_ungraded(x_log10_organization='') + +if res.object is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | + + +### Response + +**[operations.ListUngradedResponse](../../models/operations/listungradedresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/feedback/README.md b/docs/sdks/feedback/README.md new file mode 100644 index 0000000..50bf882 --- /dev/null +++ b/docs/sdks/feedback/README.md @@ -0,0 +1,144 @@ +# Feedback +(*feedback*) + +## Overview + +Feedback + +### Available Operations + +* [get](#get) - Fetch feedback by id. +* [list](#list) - List feedback +* [upload](#upload) - Upload a piece of feedback + +## get + +Fetch feedback by id. + +### Example Usage + +```python +import log10 + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.feedback.get(feedback_id='', x_log10_organization='') + +if res.feedback is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ------------------------- | ------------------------- | ------------------------- | ------------------------- | +| `feedback_id` | *str* | :heavy_check_mark: | The feedback id to fetch. | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | + + +### Response + +**[operations.GetResponse](../../models/operations/getresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | + +## list + +List feedback + +### Example Usage + +```python +import log10 +from log10.models import operations + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.feedback.list(x_log10_organization='', request_body=operations.ListRequestBody()) + +if res.object is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------- | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | +| `request_body` | [Optional[operations.ListRequestBody]](../../models/operations/listrequestbody.md) | :heavy_minus_sign: | N/A | + + +### Response + +**[operations.ListResponse](../../models/operations/listresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | + +## upload + +Upload a piece of feedback + +### Example Usage + +```python +import log10 +from log10.models import operations + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.feedback.upload(request_body=operations.One( + task_id='', + json_values=operations.JSONValues(), + matched_completion_ids=[ + '', + ], + comment='The slim & simple Maple Gaming Keyboard from Dev Byte comes with a sleek body and 7- Color RGB LED Back-lighting for smart functionality', + completion_tags_selector=[ + '', + ], +), x_log10_organization='') + +if res.feedback is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | ---------------------------------------------------------------------------- | +| `request_body` | [operations.UploadRequestBody](../../models/operations/uploadrequestbody.md) | :heavy_check_mark: | N/A | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | + + +### Response + +**[operations.UploadResponse](../../models/operations/uploadresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/feedbacktasks/README.md b/docs/sdks/feedbacktasks/README.md new file mode 100644 index 0000000..a069dbc --- /dev/null +++ b/docs/sdks/feedbacktasks/README.md @@ -0,0 +1,129 @@ +# FeedbackTasks +(*feedback_tasks*) + +## Overview + +FeedbackTasks + +### Available Operations + +* [list](#list) - List feedback tasks. +* [create](#create) - Create a new task. +* [get](#get) - Retrieves feedback task `taskId`. + +## list + +List feedback tasks. + +### Example Usage + +```python +import log10 + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.feedback_tasks.list() + +if res.tasks is not None: + # handle response + pass + +``` + + +### Response + +**[operations.ListFeedbackTasksResponse](../../models/operations/listfeedbacktasksresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | + +## create + +Create a new task. + +### Example Usage + +```python +import log10 +from log10.models import components + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.feedback_tasks.create(request=components.Task( + json_schema=components.JSONSchema(), + name='', + instruction='', + completion_tags_selector=components.CompletionTagsSelector(), +)) + +if res.task is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | -------------------------------------------------- | +| `request` | [components.Task](../../models/components/task.md) | :heavy_check_mark: | The request object to use for the request. | + + +### Response + +**[operations.CreateFeedbackTaskResponse](../../models/operations/createfeedbacktaskresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | + +## get + +Retrieves feedback task `taskId`. + +### Example Usage + +```python +import log10 + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.feedback_tasks.get(task_id='') + +if res.task is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| --------------------- | --------------------- | --------------------- | --------------------- | +| `task_id` | *str* | :heavy_check_mark: | The task id to fetch. | + + +### Response + +**[operations.GetFeedbackTaskResponse](../../models/operations/getfeedbacktaskresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | diff --git a/docs/sdks/log10/README.md b/docs/sdks/log10/README.md new file mode 100644 index 0000000..89307d2 --- /dev/null +++ b/docs/sdks/log10/README.md @@ -0,0 +1,9 @@ +# Log10 SDK + + +## Overview + +Log10 Feedback API Spec: Log10 Feedback API Spec + +### Available Operations + diff --git a/docs/sdks/sessions/README.md b/docs/sdks/sessions/README.md new file mode 100644 index 0000000..27c6298 --- /dev/null +++ b/docs/sdks/sessions/README.md @@ -0,0 +1,49 @@ +# Sessions +(*sessions*) + +## Overview + +Sessions + +### Available Operations + +* [create](#create) - Create a session + +## create + +Create a session + +### Example Usage + +```python +import log10 + +s = log10.Log10( + log10_token="", + x_log10_organization='', +) + + +res = s.sessions.create(x_log10_organization='') + +if res.object is not None: + # handle response + pass + +``` + +### Parameters + +| Parameter | Type | Required | Description | +| ---------------------- | ---------------------- | ---------------------- | ---------------------- | +| `x_log10_organization` | *Optional[str]* | :heavy_minus_sign: | N/A | + + +### Response + +**[operations.CreateSessionResponse](../../models/operations/createsessionresponse.md)** +### Errors + +| Error Object | Status Code | Content Type | +| --------------- | --------------- | --------------- | +| errors.SDKError | 4xx-5xx | */* | diff --git a/openai.yaml b/openai.yaml new file mode 100644 index 0000000..b0437f6 --- /dev/null +++ b/openai.yaml @@ -0,0 +1,13360 @@ +openapi: 3.0.0 +info: + title: OpenAI API + description: The OpenAI REST API. Please see https://platform.openai.com/docs/api-reference for more details. + version: "2.0.0" + termsOfService: https://openai.com/policies/terms-of-use + contact: + name: OpenAI Support + url: https://help.openai.com/ + license: + name: MIT + url: https://github.com/openai/openai-openapi/blob/master/LICENSE +servers: + - url: https://api.openai.com/v1 +tags: + - name: Assistants + description: Build Assistants that can call models and use tools. + - name: Audio + description: Learn how to turn audio into text or text into audio. + - name: Chat + description: Given a list of messages comprising a conversation, the model will return a response. + - name: Completions + description: Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position. + - name: Embeddings + description: Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + - name: Fine-tuning + description: Manage fine-tuning jobs to tailor a model to your specific training data. + - name: Batch + description: Create large batches of API requests to run asynchronously. + - name: Files + description: Files are used to upload documents that can be used with features like Assistants and Fine-tuning. + - name: Images + description: Given a prompt and/or an input image, the model will generate a new image. + - name: Models + description: List and describe the various models available in the API. + - name: Moderations + description: Given a input text, outputs if the model classifies it as potentially harmful. +paths: + # Note: When adding an endpoint, make sure you also add it in the `groups` section, in the end of this file, + # under the appropriate group + /chat/completions: + post: + operationId: createChatCompletion + tags: + - Chat + summary: Creates a model response for the given chat conversation. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateChatCompletionResponse" + + x-oaiMeta: + name: Create chat completion + group: chat + returns: | + Returns a [chat completion](/docs/api-reference/chat/object) object, or a streamed sequence of [chat completion chunk](/docs/api-reference/chat/streaming) objects if the request is streamed. + path: create + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ] + }' + python: | + from openai import OpenAI + client = OpenAI() + + completion = client.chat.completions.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ] + ) + + print(completion.choices[0].message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "system", content: "You are a helpful assistant." }], + model: "VAR_model_id", + }); + + console.log(completion.choices[0]); + } + + main(); + response: &chat_completion_example | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nHello there, how may I assist you today?", + }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + - title: Image input + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-4-turbo", + "messages": [ + { + "role": "user", + "content": [ + { + "type": "text", + "text": "What'\''s in this image?" + }, + { + "type": "image_url", + "image_url": { + "url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg" + } + } + ] + } + ], + "max_tokens": 300 + }' + python: | + from openai import OpenAI + + client = OpenAI() + + response = client.chat.completions.create( + model="gpt-4-turbo", + messages=[ + { + "role": "user", + "content": [ + {"type": "text", "text": "What's in this image?"}, + { + "type": "image_url", + "image_url": "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + ], + } + ], + max_tokens=300, + ) + + print(response.choices[0]) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const response = await openai.chat.completions.create({ + model: "gpt-4-turbo", + messages: [ + { + role: "user", + content: [ + { type: "text", text: "What's in this image?" }, + { + type: "image_url", + image_url: + "https://upload.wikimedia.org/wikipedia/commons/thumb/d/dd/Gfp-wisconsin-madison-the-nature-boardwalk.jpg/2560px-Gfp-wisconsin-madison-the-nature-boardwalk.jpg", + }, + ], + }, + ], + }); + console.log(response.choices[0]); + } + main(); + response: &chat_completion_image_example | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1677652288, + "model": "gpt-3.5-turbo-0125", + "system_fingerprint": "fp_44709d6fcb", + "choices": [{ + "index": 0, + "message": { + "role": "assistant", + "content": "\n\nThis image shows a wooden boardwalk extending through a lush green marshland.", + }, + "logprobs": null, + "finish_reason": "stop" + }], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 12, + "total_tokens": 21 + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "system", + "content": "You are a helpful assistant." + }, + { + "role": "user", + "content": "Hello!" + } + ], + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + completion = client.chat.completions.create( + model="VAR_model_id", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream=True + ) + + for chunk in completion: + print(chunk.choices[0].delta) + + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + model: "VAR_model_id", + messages: [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Hello!"} + ], + stream: true, + }); + + for await (const chunk of completion) { + console.log(chunk.choices[0].delta.content); + } + } + + main(); + response: &chat_completion_chunk_example | + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"role":"assistant","content":""},"logprobs":null,"finish_reason":null}]} + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{"content":"Hello"},"logprobs":null,"finish_reason":null}]} + + .... + + {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1694268190,"model":"gpt-3.5-turbo-0125", "system_fingerprint": "fp_44709d6fcb", "choices":[{"index":0,"delta":{},"logprobs":null,"finish_reason":"stop"}]} + - title: Functions + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "gpt-4-turbo", + "messages": [ + { + "role": "user", + "content": "What'\''s the weather like in Boston today?" + } + ], + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "tool_choice": "auto" + }' + python: | + from openai import OpenAI + client = OpenAI() + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + messages = [{"role": "user", "content": "What's the weather like in Boston today?"}] + completion = client.chat.completions.create( + model="VAR_model_id", + messages=messages, + tools=tools, + tool_choice="auto" + ) + + print(completion) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const messages = [{"role": "user", "content": "What's the weather like in Boston today?"}]; + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + const response = await openai.chat.completions.create({ + model: "gpt-4-turbo", + messages: messages, + tools: tools, + tool_choice: "auto", + }); + + console.log(response); + } + + main(); + response: &chat_completion_function_example | + { + "id": "chatcmpl-abc123", + "object": "chat.completion", + "created": 1699896916, + "model": "gpt-3.5-turbo-0125", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": null, + "tool_calls": [ + { + "id": "call_abc123", + "type": "function", + "function": { + "name": "get_current_weather", + "arguments": "{\n\"location\": \"Boston, MA\"\n}" + } + } + ] + }, + "logprobs": null, + "finish_reason": "tool_calls" + } + ], + "usage": { + "prompt_tokens": 82, + "completion_tokens": 17, + "total_tokens": 99 + } + } + - title: Logprobs + request: + curl: | + curl https://api.openai.com/v1/chat/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "messages": [ + { + "role": "user", + "content": "Hello!" + } + ], + "logprobs": true, + "top_logprobs": 2 + }' + python: | + from openai import OpenAI + client = OpenAI() + + completion = client.chat.completions.create( + model="VAR_model_id", + messages=[ + {"role": "user", "content": "Hello!"} + ], + logprobs=True, + top_logprobs=2 + ) + + print(completion.choices[0].message) + print(completion.choices[0].logprobs) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.chat.completions.create({ + messages: [{ role: "user", content: "Hello!" }], + model: "VAR_model_id", + logprobs: true, + top_logprobs: 2, + }); + + console.log(completion.choices[0]); + } + + main(); + response: | + { + "id": "chatcmpl-123", + "object": "chat.completion", + "created": 1702685778, + "model": "gpt-3.5-turbo-0125", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "Hello! How can I assist you today?" + }, + "logprobs": { + "content": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111], + "top_logprobs": [ + { + "token": "Hello", + "logprob": -0.31725305, + "bytes": [72, 101, 108, 108, 111] + }, + { + "token": "Hi", + "logprob": -1.3190403, + "bytes": [72, 105] + } + ] + }, + { + "token": "!", + "logprob": -0.02380986, + "bytes": [ + 33 + ], + "top_logprobs": [ + { + "token": "!", + "logprob": -0.02380986, + "bytes": [33] + }, + { + "token": " there", + "logprob": -3.787621, + "bytes": [32, 116, 104, 101, 114, 101] + } + ] + }, + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119], + "top_logprobs": [ + { + "token": " How", + "logprob": -0.000054669687, + "bytes": [32, 72, 111, 119] + }, + { + "token": "<|end|>", + "logprob": -10.953937, + "bytes": null + } + ] + }, + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110], + "top_logprobs": [ + { + "token": " can", + "logprob": -0.015801601, + "bytes": [32, 99, 97, 110] + }, + { + "token": " may", + "logprob": -4.161023, + "bytes": [32, 109, 97, 121] + } + ] + }, + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [ + 32, + 73 + ], + "top_logprobs": [ + { + "token": " I", + "logprob": -3.7697225e-6, + "bytes": [32, 73] + }, + { + "token": " assist", + "logprob": -13.596657, + "bytes": [32, 97, 115, 115, 105, 115, 116] + } + ] + }, + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116], + "top_logprobs": [ + { + "token": " assist", + "logprob": -0.04571125, + "bytes": [32, 97, 115, 115, 105, 115, 116] + }, + { + "token": " help", + "logprob": -3.1089056, + "bytes": [32, 104, 101, 108, 112] + } + ] + }, + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117], + "top_logprobs": [ + { + "token": " you", + "logprob": -5.4385737e-6, + "bytes": [32, 121, 111, 117] + }, + { + "token": " today", + "logprob": -12.807695, + "bytes": [32, 116, 111, 100, 97, 121] + } + ] + }, + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121], + "top_logprobs": [ + { + "token": " today", + "logprob": -0.0040071653, + "bytes": [32, 116, 111, 100, 97, 121] + }, + { + "token": "?", + "logprob": -5.5247097, + "bytes": [63] + } + ] + }, + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63], + "top_logprobs": [ + { + "token": "?", + "logprob": -0.0008108172, + "bytes": [63] + }, + { + "token": "?\n", + "logprob": -7.184561, + "bytes": [63, 10] + } + ] + } + ] + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 9, + "completion_tokens": 9, + "total_tokens": 18 + }, + "system_fingerprint": null + } + + /completions: + post: + operationId: createCompletion + tags: + - Completions + summary: Creates a completion for the provided prompt and parameters. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateCompletionResponse" + x-oaiMeta: + name: Create completion + group: completions + returns: | + Returns a [completion](/docs/api-reference/completions/object) object, or a sequence of completion objects if the request is streamed. + legacy: true + examples: + - title: No streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0 + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.completions.create( + model="VAR_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0 + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const completion = await openai.completions.create({ + model: "VAR_model_id", + prompt: "Say this is a test.", + max_tokens: 7, + temperature: 0, + }); + + console.log(completion); + } + main(); + response: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "VAR_model_id", + "system_fingerprint": "fp_44709d6fcb", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/completions \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "VAR_model_id", + "prompt": "Say this is a test", + "max_tokens": 7, + "temperature": 0, + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + for chunk in client.completions.create( + model="VAR_model_id", + prompt="Say this is a test", + max_tokens=7, + temperature=0, + stream=True + ): + print(chunk.choices[0].text) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.completions.create({ + model: "VAR_model_id", + prompt: "Say this is a test.", + stream: true, + }); + + for await (const chunk of stream) { + console.log(chunk.choices[0].text) + } + } + main(); + response: | + { + "id": "cmpl-7iA7iJjj8V2zOkCGvWF2hAkDWBQZe", + "object": "text_completion", + "created": 1690759702, + "choices": [ + { + "text": "This", + "index": 0, + "logprobs": null, + "finish_reason": null + } + ], + "model": "gpt-3.5-turbo-instruct" + "system_fingerprint": "fp_44709d6fcb", + } + + /images/generations: + post: + operationId: createImage + tags: + - Images + summary: Creates an image given a prompt. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateImageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image + group: images + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/generations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "model": "dall-e-3", + "prompt": "A cute baby sea otter", + "n": 1, + "size": "1024x1024" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.images.generate( + model="dall-e-3", + prompt="A cute baby sea otter", + n=1, + size="1024x1024" + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.generate({ model: "dall-e-3", prompt: "A cute baby sea otter" }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + /images/edits: + post: + operationId: createImageEdit + tags: + - Images + summary: Creates an edited or extended image given an original image and a prompt. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateImageEditRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image edit + group: images + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/edits \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F image="@otter.png" \ + -F mask="@mask.png" \ + -F prompt="A cute baby sea otter wearing a beret" \ + -F n=2 \ + -F size="1024x1024" + python: | + from openai import OpenAI + client = OpenAI() + + client.images.edit( + image=open("otter.png", "rb"), + mask=open("mask.png", "rb"), + prompt="A cute baby sea otter wearing a beret", + n=2, + size="1024x1024" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.edit({ + image: fs.createReadStream("otter.png"), + mask: fs.createReadStream("mask.png"), + prompt: "A cute baby sea otter wearing a beret", + }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + /images/variations: + post: + operationId: createImageVariation + tags: + - Images + summary: Creates a variation of a given image. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateImageVariationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ImagesResponse" + x-oaiMeta: + name: Create image variation + group: images + returns: Returns a list of [image](/docs/api-reference/images/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/images/variations \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F image="@otter.png" \ + -F n=2 \ + -F size="1024x1024" + python: | + from openai import OpenAI + client = OpenAI() + + response = client.images.create_variation( + image=open("image_edit_original.png", "rb"), + n=2, + size="1024x1024" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const image = await openai.images.createVariation({ + image: fs.createReadStream("otter.png"), + }); + + console.log(image.data); + } + main(); + response: | + { + "created": 1589478378, + "data": [ + { + "url": "https://..." + }, + { + "url": "https://..." + } + ] + } + + /embeddings: + post: + operationId: createEmbedding + tags: + - Embeddings + summary: Creates an embedding vector representing the input text. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateEmbeddingResponse" + x-oaiMeta: + name: Create embeddings + group: embeddings + returns: A list of [embedding](/docs/api-reference/embeddings/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/embeddings \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "input": "The food was delicious and the waiter...", + "model": "text-embedding-ada-002", + "encoding_format": "float" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.embeddings.create( + model="text-embedding-ada-002", + input="The food was delicious and the waiter...", + encoding_format="float" + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const embedding = await openai.embeddings.create({ + model: "text-embedding-ada-002", + input: "The quick brown fox jumped over the lazy dog", + encoding_format: "float", + }); + + console.log(embedding); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + ], + "model": "text-embedding-ada-002", + "usage": { + "prompt_tokens": 8, + "total_tokens": 8 + } + } + + /audio/speech: + post: + operationId: createSpeech + tags: + - Audio + summary: Generates audio from the input text. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateSpeechRequest" + responses: + "200": + description: OK + headers: + Transfer-Encoding: + schema: + type: string + description: chunked + content: + application/octet-stream: + schema: + type: string + format: binary + x-oaiMeta: + name: Create speech + group: audio + returns: The audio file content. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/speech \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "model": "tts-1", + "input": "The quick brown fox jumped over the lazy dog.", + "voice": "alloy" + }' \ + --output speech.mp3 + python: | + from pathlib import Path + import openai + + speech_file_path = Path(__file__).parent / "speech.mp3" + response = openai.audio.speech.create( + model="tts-1", + voice="alloy", + input="The quick brown fox jumped over the lazy dog." + ) + response.stream_to_file(speech_file_path) + node: | + import fs from "fs"; + import path from "path"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const speechFile = path.resolve("./speech.mp3"); + + async function main() { + const mp3 = await openai.audio.speech.create({ + model: "tts-1", + voice: "alloy", + input: "Today is a wonderful day to build something people love!", + }); + console.log(speechFile); + const buffer = Buffer.from(await mp3.arrayBuffer()); + await fs.promises.writeFile(speechFile, buffer); + } + main(); + /audio/transcriptions: + post: + operationId: createTranscription + tags: + - Audio + summary: Transcribes audio into the input language. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranscriptionRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/CreateTranscriptionResponseJson" + - $ref: "#/components/schemas/CreateTranscriptionResponseVerboseJson" + x-oaiMeta: + name: Create transcription + group: audio + returns: The [transcription object](/docs/api-reference/audio/json-object) or a [verbose transcription object](/docs/api-reference/audio/verbose-json-object). + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F model="whisper-1" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + model="whisper-1", + file=audio_file + ) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + }); + + console.log(transcription.text); + } + main(); + response: &basic_transcription_response_example | + { + "text": "Imagine the wildest idea that you've ever had, and you're curious about how it might scale to something that's a 100, a 1,000 times bigger. This is a place where you can get to do that." + } + - title: Word timestamps + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F "timestamp_granularities[]=word" \ + -F model="whisper-1" \ + -F response_format="verbose_json" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + file=audio_file, + model="whisper-1", + response_format="verbose_json", + timestamp_granularities=["word"] + ) + + print(transcript.words) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + response_format: "verbose_json", + timestamp_granularities: ["word"] + }); + + console.log(transcription.text); + } + main(); + response: | + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "words": [ + { + "word": "The", + "start": 0.0, + "end": 0.23999999463558197 + }, + ... + { + "word": "volleyball", + "start": 7.400000095367432, + "end": 7.900000095367432 + } + ] + } + - title: Segment timestamps + request: + curl: | + curl https://api.openai.com/v1/audio/transcriptions \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/audio.mp3" \ + -F "timestamp_granularities[]=segment" \ + -F model="whisper-1" \ + -F response_format="verbose_json" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.transcriptions.create( + file=audio_file, + model="whisper-1", + response_format="verbose_json", + timestamp_granularities=["segment"] + ) + + print(transcript.words) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const transcription = await openai.audio.transcriptions.create({ + file: fs.createReadStream("audio.mp3"), + model: "whisper-1", + response_format: "verbose_json", + timestamp_granularities: ["segment"] + }); + + console.log(transcription.text); + } + main(); + response: &verbose_transcription_response_example | + { + "task": "transcribe", + "language": "english", + "duration": 8.470000267028809, + "text": "The beach was a popular spot on a hot summer day. People were swimming in the ocean, building sandcastles, and playing beach volleyball.", + "segments": [ + { + "id": 0, + "seek": 0, + "start": 0.0, + "end": 3.319999933242798, + "text": " The beach was a popular spot on a hot summer day.", + "tokens": [ + 50364, 440, 7534, 390, 257, 3743, 4008, 322, 257, 2368, 4266, 786, 13, 50530 + ], + "temperature": 0.0, + "avg_logprob": -0.2860786020755768, + "compression_ratio": 1.2363636493682861, + "no_speech_prob": 0.00985979475080967 + }, + ... + ] + } + /audio/translations: + post: + operationId: createTranslation + tags: + - Audio + summary: Translates audio into English. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateTranslationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + oneOf: + - $ref: "#/components/schemas/CreateTranslationResponseJson" + - $ref: "#/components/schemas/CreateTranslationResponseVerboseJson" + x-oaiMeta: + name: Create translation + group: audio + returns: The translated text. + examples: + request: + curl: | + curl https://api.openai.com/v1/audio/translations \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: multipart/form-data" \ + -F file="@/path/to/file/german.m4a" \ + -F model="whisper-1" + python: | + from openai import OpenAI + client = OpenAI() + + audio_file = open("speech.mp3", "rb") + transcript = client.audio.translations.create( + model="whisper-1", + file=audio_file + ) + node: | + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const translation = await openai.audio.translations.create({ + file: fs.createReadStream("speech.mp3"), + model: "whisper-1", + }); + + console.log(translation.text); + } + main(); + response: | + { + "text": "Hello, my name is Wolfgang and I come from Germany. Where are you heading today?" + } + + /files: + get: + operationId: listFiles + tags: + - Files + summary: Returns a list of files that belong to the user's organization. + parameters: + - in: query + name: purpose + required: false + schema: + type: string + description: Only return files with the given purpose. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFilesResponse" + x-oaiMeta: + name: List files + group: files + returns: A list of [File](/docs/api-reference/files/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.files.list(); + + for await (const file of list) { + console.log(file); + } + } + + main(); + response: | + { + "data": [ + { + "id": "file-abc123", + "object": "file", + "bytes": 175, + "created_at": 1613677385, + "filename": "salesOverview.pdf", + "purpose": "assistants", + }, + { + "id": "file-abc123", + "object": "file", + "bytes": 140, + "created_at": 1613779121, + "filename": "puppy.jsonl", + "purpose": "fine-tune", + } + ], + "object": "list" + } + post: + operationId: createFile + tags: + - Files + summary: | + Upload a file that can be used across various endpoints. The size of all the files uploaded by one organization can be up to 100 GB. + + The size of individual files can be a maximum of 512 MB or 2 million tokens for Assistants. See the [Assistants Tools guide](/docs/assistants/tools) to learn more about the types of files supported. The Fine-tuning API only supports `.jsonl` files. + + Please [contact us](https://help.openai.com/) if you need to increase these storage limits. + requestBody: + required: true + content: + multipart/form-data: + schema: + $ref: "#/components/schemas/CreateFileRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/OpenAIFile" + x-oaiMeta: + name: Upload file + group: files + returns: The uploaded [File](/docs/api-reference/files/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -F purpose="fine-tune" \ + -F file="@mydata.jsonl" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.create( + file=open("mydata.jsonl", "rb"), + purpose="fine-tune" + ) + node.js: |- + import fs from "fs"; + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.create({ + file: fs.createReadStream("mydata.jsonl"), + purpose: "fine-tune", + }); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "mydata.jsonl", + "purpose": "fine-tune", + } + /files/{file_id}: + delete: + operationId: deleteFile + tags: + - Files + summary: Delete a file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteFileResponse" + x-oaiMeta: + name: Delete file + group: files + returns: Deletion status. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123 \ + -X DELETE \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.delete("file-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.del("file-abc123"); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "deleted": true + } + get: + operationId: retrieveFile + tags: + - Files + summary: Returns information about a specific file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/OpenAIFile" + x-oaiMeta: + name: Retrieve file + group: files + returns: The [File](/docs/api-reference/files/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.files.retrieve("file-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.retrieve("file-abc123"); + + console.log(file); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "mydata.jsonl", + "purpose": "fine-tune", + } + /files/{file_id}/content: + get: + operationId: downloadFile + tags: + - Files + summary: Returns the contents of the specified file. + parameters: + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to use for this request. + responses: + "200": + description: OK + content: + application/json: + schema: + type: string + x-oaiMeta: + name: Retrieve file content + group: files + returns: The file content. + examples: + request: + curl: | + curl https://api.openai.com/v1/files/file-abc123/content \ + -H "Authorization: Bearer $OPENAI_API_KEY" > file.jsonl + python: | + from openai import OpenAI + client = OpenAI() + + content = client.files.content("file-abc123") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const file = await openai.files.content("file-abc123"); + + console.log(file); + } + + main(); + + /fine_tuning/jobs: + post: + operationId: createFineTuningJob + tags: + - Fine-tuning + summary: | + Creates a fine-tuning job which begins the process of creating a new model from a given dataset. + + Response includes details of the enqueued job including job status and the name of the fine-tuned models once complete. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateFineTuningJobRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Create fine-tuning job + group: fine-tuning + returns: A [fine-tuning.job](/docs/api-reference/fine-tuning/object) object. + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-BK7bzQj3FfZFXr7DbL6xJwfo", + "model": "gpt-3.5-turbo" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-3.5-turbo" + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0125", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + } + - title: Epochs + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "model": "gpt-3.5-turbo", + "hyperparameters": { + "n_epochs": 2 + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + model="gpt-3.5-turbo", + hyperparameters={ + "n_epochs":2 + } + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + model: "gpt-3.5-turbo", + hyperparameters: { n_epochs: 2 } + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0125", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": {"n_epochs": 2}, + } + - title: Validation file + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-3.5-turbo" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.create( + training_file="file-abc123", + validation_file="file-def456", + model="gpt-3.5-turbo" + ) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.create({ + training_file: "file-abc123", + validation_file: "file-abc123" + }); + + console.log(fineTune); + } + + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0125", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", + } + - title: W&B Integration + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "training_file": "file-abc123", + "validation_file": "file-abc123", + "model": "gpt-3.5-turbo", + "integrations": [ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "name": "ft-run-display-name" + "tags": [ + "first-experiment", "v2" + ] + } + } + ] + }' + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0125", + "created_at": 1614807352, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "status": "queued", + "validation_file": "file-abc123", + "training_file": "file-abc123", + "integrations": [ + { + "type": "wandb", + "wandb": { + "project": "my-wandb-project", + "entity": None, + "run_id": "ftjob-abc123" + } + } + ] + } + get: + operationId: listPaginatedFineTuningJobs + tags: + - Fine-tuning + summary: | + List your organization's fine-tuning jobs + parameters: + - name: after + in: query + description: Identifier for the last job from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of fine-tuning jobs to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListPaginatedFineTuningJobsResponse" + x-oaiMeta: + name: List fine-tuning jobs + group: fine-tuning + returns: A list of paginated [fine-tuning job](/docs/api-reference/fine-tuning/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs?limit=2 \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.fineTuning.jobs.list(); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-TjX0lMfOniCZX64t9PUQT5hn", + "created_at": 1689813489, + "level": "warn", + "message": "Fine tuning process stopping due to job cancellation", + "data": null, + "type": "message" + }, + { ... }, + { ... } + ], "has_more": true + } + /fine_tuning/jobs/{fine_tuning_job_id}: + get: + operationId: retrieveFineTuningJob + tags: + - Fine-tuning + summary: | + Get info about a fine-tuning job. + + [Learn more about fine-tuning](/docs/guides/fine-tuning) + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Retrieve fine-tuning job + group: fine-tuning + returns: The [fine-tuning](/docs/api-reference/fine-tuning/object) object with the given ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs/ft-AF1WoRqd3aJAHsqc9NY7iL8F \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.retrieve("ftjob-abc123") + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.retrieve("ftjob-abc123"); + + console.log(fineTune); + } + + main(); + response: &fine_tuning_example | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "davinci-002", + "created_at": 1692661014, + "finished_at": 1692661190, + "fine_tuned_model": "ft:davinci-002:my-org:custom_suffix:7q8mpxmy", + "organization_id": "org-123", + "result_files": [ + "file-abc123" + ], + "status": "succeeded", + "validation_file": null, + "training_file": "file-abc123", + "hyperparameters": { + "n_epochs": 4, + "batch_size": 1, + "learning_rate_multiplier": 1.0 + }, + "trained_tokens": 5768, + "integrations": [], + "seed": 0, + "estimated_finish": 0 + } + /fine_tuning/jobs/{fine_tuning_job_id}/events: + get: + operationId: listFineTuningEvents + tags: + - Fine-tuning + summary: | + Get status updates for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get events for. + - name: after + in: query + description: Identifier for the last event from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of events to retrieve. + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobEventsResponse" + x-oaiMeta: + name: List fine-tuning events + group: fine-tuning + returns: A list of fine-tuning event objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/events \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.list_events( + fine_tuning_job_id="ftjob-abc123", + limit=2 + ) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.fineTuning.list_events(id="ftjob-abc123", limit=2); + + for await (const fineTune of list) { + console.log(fineTune); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "object": "fine_tuning.job.event", + "id": "ft-event-ddTJfwuMVpfLXseO0Am0Gqjm", + "created_at": 1692407401, + "level": "info", + "message": "Fine tuning job successfully completed", + "data": null, + "type": "message" + }, + { + "object": "fine_tuning.job.event", + "id": "ft-event-tyiGuB72evQncpH87xe505Sv", + "created_at": 1692407400, + "level": "info", + "message": "New fine-tuned model created: ft:gpt-3.5-turbo:openai::7p4lURel", + "data": null, + "type": "message" + } + ], + "has_more": true + } + /fine_tuning/jobs/{fine_tuning_job_id}/cancel: + post: + operationId: cancelFineTuningJob + tags: + - Fine-tuning + summary: | + Immediately cancel a fine-tune job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/FineTuningJob" + x-oaiMeta: + name: Cancel fine-tuning + group: fine-tuning + returns: The cancelled [fine-tuning](/docs/api-reference/fine-tuning/object) object. + examples: + request: + curl: | + curl -X POST https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.fine_tuning.jobs.cancel("ftjob-abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const fineTune = await openai.fineTuning.jobs.cancel("ftjob-abc123"); + + console.log(fineTune); + } + main(); + response: | + { + "object": "fine_tuning.job", + "id": "ftjob-abc123", + "model": "gpt-3.5-turbo-0125", + "created_at": 1689376978, + "fine_tuned_model": null, + "organization_id": "org-123", + "result_files": [], + "hyperparameters": { + "n_epochs": "auto" + }, + "status": "cancelled", + "validation_file": "file-abc123", + "training_file": "file-abc123" + } + /fine_tuning/jobs/{fine_tuning_job_id}/checkpoints: + get: + operationId: listFineTuningJobCheckpoints + tags: + - Fine-tuning + summary: | + List checkpoints for a fine-tuning job. + parameters: + - in: path + name: fine_tuning_job_id + required: true + schema: + type: string + example: ft-AF1WoRqd3aJAHsqc9NY7iL8F + description: | + The ID of the fine-tuning job to get checkpoints for. + - name: after + in: query + description: Identifier for the last checkpoint ID from the previous pagination request. + required: false + schema: + type: string + - name: limit + in: query + description: Number of checkpoints to retrieve. + required: false + schema: + type: integer + default: 10 + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListFineTuningJobCheckpointsResponse" + x-oaiMeta: + name: List fine-tuning checkpoints + group: fine-tuning + returns: A list of fine-tuning [checkpoint objects](/docs/api-reference/fine-tuning/checkpoint-object) for a fine-tuning job. + examples: + request: + curl: | + curl https://api.openai.com/v1/fine_tuning/jobs/ftjob-abc123/checkpoints \ + -H "Authorization: Bearer $OPENAI_API_KEY" + response: | + { + "object": "list" + "data": [ + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", + "created_at": 1519129973, + "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:96olL566:ckpt-step-2000", + "metrics": { + "full_valid_loss": 0.134, + "full_valid_mean_token_accuracy": 0.874 + }, + "fine_tuning_job_id": "ftjob-abc123", + "step_number": 2000, + }, + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", + "created_at": 1519129833, + "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom-suffix:7q8mpxmy:ckpt-step-1000", + "metrics": { + "full_valid_loss": 0.167, + "full_valid_mean_token_accuracy": 0.781 + }, + "fine_tuning_job_id": "ftjob-abc123", + "step_number": 1000, + }, + ], + "first_id": "ftckpt_zc4Q7MP6XxulcVzj4MZdwsAB", + "last_id": "ftckpt_enQCFmOTGj3syEpYVhBRLTSy", + "has_more": true + } + + /models: + get: + operationId: listModels + tags: + - Models + summary: Lists the currently available models, and provides basic information about each one such as the owner and availability. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListModelsResponse" + x-oaiMeta: + name: List models + group: models + returns: A list of [model](/docs/api-reference/models/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/models \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.models.list() + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.models.list(); + + for await (const model of list) { + console.log(model); + } + } + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "model-id-0", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner" + }, + { + "id": "model-id-1", + "object": "model", + "created": 1686935002, + "owned_by": "organization-owner", + }, + { + "id": "model-id-2", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + }, + ], + "object": "list" + } + /models/{model}: + get: + operationId: retrieveModel + tags: + - Models + summary: Retrieves a model instance, providing basic information about the model such as the owner and permissioning. + parameters: + - in: path + name: model + required: true + schema: + type: string + # ideally this will be an actual ID, so this will always work from browser + example: gpt-3.5-turbo + description: The ID of the model to use for this request + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Model" + x-oaiMeta: + name: Retrieve model + group: models + returns: The [model](/docs/api-reference/models/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/models/VAR_model_id \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.models.retrieve("VAR_model_id") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const model = await openai.models.retrieve("VAR_model_id"); + + console.log(model); + } + + main(); + response: &retrieve_model_response | + { + "id": "VAR_model_id", + "object": "model", + "created": 1686935002, + "owned_by": "openai" + } + delete: + operationId: deleteModel + tags: + - Models + summary: Delete a fine-tuned model. You must have the Owner role in your organization to delete a model. + parameters: + - in: path + name: model + required: true + schema: + type: string + example: ft:gpt-3.5-turbo:acemeco:suffix:abc123 + description: The model to delete + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteModelResponse" + x-oaiMeta: + name: Delete a fine-tuned model + group: models + returns: Deletion status. + examples: + request: + curl: | + curl https://api.openai.com/v1/models/ft:gpt-3.5-turbo:acemeco:suffix:abc123 \ + -X DELETE \ + -H "Authorization: Bearer $OPENAI_API_KEY" + python: | + from openai import OpenAI + client = OpenAI() + + client.models.delete("ft:gpt-3.5-turbo:acemeco:suffix:abc123") + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const model = await openai.models.del("ft:gpt-3.5-turbo:acemeco:suffix:abc123"); + + console.log(model); + } + main(); + response: | + { + "id": "ft:gpt-3.5-turbo:acemeco:suffix:abc123", + "object": "model", + "deleted": true + } + + /moderations: + post: + operationId: createModeration + tags: + - Moderations + summary: Classifies if text is potentially harmful. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/CreateModerationResponse" + x-oaiMeta: + name: Create moderation + group: moderations + returns: A [moderation](/docs/api-reference/moderations/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/moderations \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -d '{ + "input": "I want to kill them." + }' + python: | + from openai import OpenAI + client = OpenAI() + + moderation = client.moderations.create(input="I want to kill them.") + print(moderation) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const moderation = await openai.moderations.create({ input: "I want to kill them." }); + + console.log(moderation); + } + main(); + response: &moderation_example | + { + "id": "modr-XXXXX", + "model": "text-moderation-005", + "results": [ + { + "flagged": true, + "categories": { + "sexual": false, + "hate": false, + "harassment": false, + "self-harm": false, + "sexual/minors": false, + "hate/threatening": false, + "violence/graphic": false, + "self-harm/intent": false, + "self-harm/instructions": false, + "harassment/threatening": true, + "violence": true, + }, + "category_scores": { + "sexual": 1.2282071e-06, + "hate": 0.010696256, + "harassment": 0.29842457, + "self-harm": 1.5236925e-08, + "sexual/minors": 5.7246268e-08, + "hate/threatening": 0.0060676364, + "violence/graphic": 4.435014e-06, + "self-harm/intent": 8.098441e-10, + "self-harm/instructions": 2.8498655e-11, + "harassment/threatening": 0.63055265, + "violence": 0.99011886, + } + } + ] + } + + /assistants: + get: + operationId: listAssistants + tags: + - Assistants + summary: Returns a list of assistants. + parameters: + - name: limit + in: query + description: &pagination_limit_param_description | + A limit on the number of objects to be returned. Limit can range between 1 and 100, and the default is 20. + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: &pagination_order_param_description | + Sort order by the `created_at` timestamp of the objects. `asc` for ascending order and `desc` for descending order. + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: &pagination_after_param_description | + A cursor for use in pagination. `after` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include after=obj_foo in order to fetch the next page of the list. + schema: + type: string + - name: before + in: query + description: &pagination_before_param_description | + A cursor for use in pagination. `before` is an object ID that defines your place in the list. For instance, if you make a list request and receive 100 objects, ending with obj_foo, your subsequent call can include before=obj_foo in order to fetch the previous page of the list. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListAssistantsResponse" + x-oaiMeta: + name: List assistants + group: assistants + beta: true + returns: A list of [assistant](/docs/api-reference/assistants/object) objects. + examples: + request: + curl: | + curl "https://api.openai.com/v1/assistants?order=desc&limit=20" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + my_assistants = client.beta.assistants.list( + order="desc", + limit="20", + ) + print(my_assistants.data) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistants = await openai.beta.assistants.list({ + order: "desc", + limit: "20", + }); + + console.log(myAssistants.data); + } + + main(); + response: &list_assistants_example | + { + "object": "list", + "data": [ + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698982736, + "name": "Coding Tutor", + "description": null, + "model": "gpt-4-turbo", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc456", + "object": "assistant", + "created_at": 1698982718, + "name": "My Assistant", + "description": null, + "model": "gpt-4-turbo", + "instructions": "You are a helpful assistant designed to make me better at coding!", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + }, + { + "id": "asst_abc789", + "object": "assistant", + "created_at": 1698982643, + "name": null, + "description": null, + "model": "gpt-4-turbo", + "instructions": null, + "tools": [], + "tool_resources": {}, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + ], + "first_id": "asst_abc123", + "last_id": "asst_abc789", + "has_more": false + } + post: + operationId: createAssistant + tags: + - Assistants + summary: Create an assistant with a model and instructions. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateAssistantRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Create assistant + group: assistants + beta: true + returns: An [assistant](/docs/api-reference/assistants/object) object. + examples: + - title: Code Interpreter + request: + curl: | + curl "https://api.openai.com/v1/assistants" \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "name": "Math Tutor", + "tools": [{"type": "code_interpreter"}], + "model": "gpt-4-turbo" + }' + + python: | + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.create( + instructions="You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name="Math Tutor", + tools=[{"type": "code_interpreter"}], + model="gpt-4-turbo", + ) + print(my_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + name: "Math Tutor", + tools: [{ type: "code_interpreter" }], + model: "gpt-4-turbo", + }); + + console.log(myAssistant); + } + + main(); + response: &create_assistants_example | + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1698984975, + "name": "Math Tutor", + "description": null, + "model": "gpt-4-turbo", + "instructions": "You are a personal math tutor. When asked a question, write and run Python code to answer the question.", + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + - title: Files + request: + curl: | + curl https://api.openai.com/v1/assistants \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [{"type": "file_search"}], + "tool_resources": {"file_search": {"vector_store_ids": ["vs_123"]}}, + "model": "gpt-4-turbo" + }' + python: | + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.create( + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies.", + name="HR Helper", + tools=[{"type": "file_search"}], + tool_resources={"file_search": {"vector_store_ids": ["vs_123"]}}, + model="gpt-4-turbo" + ) + print(my_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistant = await openai.beta.assistants.create({ + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies.", + name: "HR Helper", + tools: [{ type: "file_search" }], + tool_resources: { + file_search: { + vector_store_ids: ["vs_123"] + } + }, + model: "gpt-4-turbo" + }); + + console.log(myAssistant); + } + + main(); + response: | + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009403, + "name": "HR Helper", + "description": null, + "model": "gpt-4-turbo", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ + { + "type": "file_search" + } + ], + "tool_resources": { + "file_search": { + "vector_store_ids": ["vs_123"] + } + }, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + + /assistants/{assistant_id}: + get: + operationId: getAssistant + tags: + - Assistants + summary: Retrieves an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Retrieve assistant + group: assistants + beta: true + returns: The [assistant](/docs/api-reference/assistants/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + my_assistant = client.beta.assistants.retrieve("asst_abc123") + print(my_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myAssistant = await openai.beta.assistants.retrieve( + "asst_abc123" + ); + + console.log(myAssistant); + } + + main(); + response: | + { + "id": "asst_abc123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4-turbo", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies.", + "tools": [ + { + "type": "file_search" + } + ], + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + post: + operationId: modifyAssistant + tags: + - Assistants + summary: Modifies an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyAssistantRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/AssistantObject" + x-oaiMeta: + name: Modify assistant + group: assistants + beta: true + returns: The modified [assistant](/docs/api-reference/assistants/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [{"type": "file_search"}], + "model": "gpt-4-turbo" + }' + python: | + from openai import OpenAI + client = OpenAI() + + my_updated_assistant = client.beta.assistants.update( + "asst_abc123", + instructions="You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name="HR Helper", + tools=[{"type": "file_search"}], + model="gpt-4-turbo" + ) + + print(my_updated_assistant) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myUpdatedAssistant = await openai.beta.assistants.update( + "asst_abc123", + { + instructions: + "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + name: "HR Helper", + tools: [{ type: "file_search" }], + model: "gpt-4-turbo" + } + ); + + console.log(myUpdatedAssistant); + } + + main(); + response: | + { + "id": "asst_123", + "object": "assistant", + "created_at": 1699009709, + "name": "HR Helper", + "description": null, + "model": "gpt-4-turbo", + "instructions": "You are an HR bot, and you have access to files to answer employee questions about company policies. Always response with info from either of the files.", + "tools": [ + { + "type": "file_search" + } + ], + "tool_resources": { + "file_search": { + "vector_store_ids": [] + } + }, + "metadata": {}, + "top_p": 1.0, + "temperature": 1.0, + "response_format": "auto" + } + delete: + operationId: deleteAssistant + tags: + - Assistants + summary: Delete an assistant. + parameters: + - in: path + name: assistant_id + required: true + schema: + type: string + description: The ID of the assistant to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteAssistantResponse" + x-oaiMeta: + name: Delete assistant + group: assistants + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/assistants/asst_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + response = client.beta.assistants.delete("asst_abc123") + print(response) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const response = await openai.beta.assistants.del("asst_abc123"); + + console.log(response); + } + main(); + response: | + { + "id": "asst_abc123", + "object": "assistant.deleted", + "deleted": true + } + + /threads: + post: + operationId: createThread + tags: + - Assistants + summary: Create a thread. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Create thread + group: threads + beta: true + returns: A [thread](/docs/api-reference/threads) object. + examples: + - title: Empty + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '' + python: | + from openai import OpenAI + client = OpenAI() + + empty_thread = client.beta.threads.create() + print(empty_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const emptyThread = await openai.beta.threads.create(); + + console.log(emptyThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699012949, + "metadata": {}, + "tool_resources": {} + } + - title: Messages + request: + curl: | + curl https://api.openai.com/v1/threads \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "messages": [{ + "role": "user", + "content": "Hello, what is AI?" + }, { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }] + }' + python: | + from openai import OpenAI + client = OpenAI() + + message_thread = client.beta.threads.create( + messages=[ + { + "role": "user", + "content": "Hello, what is AI?" + }, + { + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }, + ] + ) + + print(message_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const messageThread = await openai.beta.threads.create({ + messages: [ + { + role: "user", + content: "Hello, what is AI?" + }, + { + role: "user", + content: "How does AI work? Explain it in simple terms.", + }, + ], + }); + + console.log(messageThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": {}, + "tool_resources": {} + } + + /threads/{thread_id}: + get: + operationId: getThread + tags: + - Assistants + summary: Retrieves a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Retrieve thread + group: threads + beta: true + returns: The [thread](/docs/api-reference/threads/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + my_thread = client.beta.threads.retrieve("thread_abc123") + print(my_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const myThread = await openai.beta.threads.retrieve( + "thread_abc123" + ); + + console.log(myThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": {}, + "tool_resources": { + "code_interpreter": { + "file_ids": [] + } + } + } + post: + operationId: modifyThread + tags: + - Assistants + summary: Modifies a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to modify. Only the `metadata` can be modified. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyThreadRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ThreadObject" + x-oaiMeta: + name: Modify thread + group: threads + beta: true + returns: The modified [thread](/docs/api-reference/threads/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "modified": "true", + "user": "abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + my_updated_thread = client.beta.threads.update( + "thread_abc123", + metadata={ + "modified": "true", + "user": "abc123" + } + ) + print(my_updated_thread) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const updatedThread = await openai.beta.threads.update( + "thread_abc123", + { + metadata: { modified: "true", user: "abc123" }, + } + ); + + console.log(updatedThread); + } + + main(); + response: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1699014083, + "metadata": { + "modified": "true", + "user": "abc123" + }, + "tool_resources": {} + } + delete: + operationId: deleteThread + tags: + - Assistants + summary: Delete a thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteThreadResponse" + x-oaiMeta: + name: Delete thread + group: threads + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + response = client.beta.threads.delete("thread_abc123") + print(response) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const response = await openai.beta.threads.del("thread_abc123"); + + console.log(response); + } + main(); + response: | + { + "id": "thread_abc123", + "object": "thread.deleted", + "deleted": true + } + + /threads/{thread_id}/messages: + get: + operationId: listMessages + tags: + - Assistants + summary: Returns a list of messages for a given thread. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) the messages belong to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + - name: run_id + in: query + description: | + Filter messages by the run ID that generated them. + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListMessagesResponse" + x-oaiMeta: + name: List messages + group: threads + beta: true + returns: A list of [message](/docs/api-reference/messages) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + thread_messages = client.beta.threads.messages.list("thread_abc123") + print(thread_messages.data) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const threadMessages = await openai.beta.threads.messages.list( + "thread_abc123" + ); + + console.log(threadMessages.data); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699016383, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + }, + { + "id": "msg_abc456", + "object": "thread.message", + "created_at": 1699016383, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "Hello, what is AI?", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + } + ], + "first_id": "msg_abc123", + "last_id": "msg_abc456", + "has_more": false + } + post: + operationId: createMessage + tags: + - Assistants + summary: Create a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to create a message for. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Create message + group: threads + beta: true + returns: A [message](/docs/api-reference/messages/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "role": "user", + "content": "How does AI work? Explain it in simple terms." + }' + python: | + from openai import OpenAI + client = OpenAI() + + thread_message = client.beta.threads.messages.create( + "thread_abc123", + role="user", + content="How does AI work? Explain it in simple terms.", + ) + print(thread_message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const threadMessages = await openai.beta.threads.messages.create( + "thread_abc123", + { role: "user", content: "How does AI work? Explain it in simple terms." } + ); + + console.log(threadMessages); + } + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1713226573, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + } + + /threads/{thread_id}/messages/{message_id}: + get: + operationId: getMessage + tags: + - Assistants + summary: Retrieve a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Retrieve message + group: threads + beta: true + returns: The [message](/docs/api-reference/threads/messages/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.retrieve( + message_id="msg_abc123", + thread_id="thread_abc123", + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.retrieve( + "thread_abc123", + "msg_abc123" + ); + + console.log(message); + } + + main(); + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "attachments": [], + "metadata": {} + } + post: + operationId: modifyMessage + tags: + - Assistants + summary: Modifies a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyMessageRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/MessageObject" + x-oaiMeta: + name: Modify message + group: threads + beta: true + returns: The modified [message](/docs/api-reference/threads/messages/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "modified": "true", + "user": "abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + message = client.beta.threads.messages.update( + message_id="msg_abc12", + thread_id="thread_abc123", + metadata={ + "modified": "true", + "user": "abc123", + }, + ) + print(message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const message = await openai.beta.threads.messages.update( + "thread_abc123", + "msg_abc123", + { + metadata: { + modified: "true", + user: "abc123", + }, + } + }' + response: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1699017614, + "assistant_id": null, + "thread_id": "thread_abc123", + "run_id": null, + "role": "user", + "content": [ + { + "type": "text", + "text": { + "value": "How does AI work? Explain it in simple terms.", + "annotations": [] + } + } + ], + "file_ids": [], + "metadata": { + "modified": "true", + "user": "abc123" + } + } + delete: + operationId: deleteMessage + tags: + - Assistants + summary: Deletes a message. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this message belongs. + - in: path + name: message_id + required: true + schema: + type: string + description: The ID of the message to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteMessageResponse" + x-oaiMeta: + name: Delete message + group: threads + beta: true + returns: Deletion status + examples: + request: + curl: | + curl -X DELETE https://api.openai.com/v1/threads/thread_abc123/messages/msg_abc123 \ + -H "Content-Type: application/json" \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + deleted_message = client.beta.threads.messages.delete( + message_id="msg_abc12", + thread_id="thread_abc123", + ) + print(deleted_message) + node.js: |- + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const deletedMessage = await openai.beta.threads.messages.del( + "thread_abc123", + "msg_abc123" + ); + + console.log(deletedMessage); + } + response: | + { + "id": "msg_abc123", + "object": "thread.message.deleted", + "deleted": true + } + + + /threads/runs: + post: + operationId: createThreadAndRun + tags: + - Assistants + summary: Create a thread and run it in one request. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateThreadAndRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create thread and run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.create_and_run( + assistant_id="asst_abc123", + thread={ + "messages": [ + {"role": "user", "content": "Explain deep learning to a 5 year old."} + ] + } + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.createAndRun({ + assistant_id: "asst_abc123", + thread: { + messages: [ + { role: "user", content: "Explain deep learning to a 5 year old." }, + ], + }, + }); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076792, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": null, + "expires_at": 1699077392, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "required_action": null, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": "You are a helpful assistant.", + "tools": [], + "tool_resources": {}, + "metadata": {}, + "temperature": 1.0, + "top_p": 1.0, + "max_completion_tokens": null, + "max_prompt_tokens": null, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "incomplete_details": null, + "usage": null, + "response_format": "auto", + "tool_choice": "auto" + } + + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_123", + "thread": { + "messages": [ + {"role": "user", "content": "Hello"} + ] + }, + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.create_and_run( + assistant_id="asst_123", + thread={ + "messages": [ + {"role": "user", "content": "Hello"} + ] + }, + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.beta.threads.createAndRun({ + assistant_id: "asst_123", + thread: { + messages: [ + { role: "user", content: "Hello" }, + ], + }, + stream: true + }); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: | + event: thread.created + data: {"id":"thread_123","object":"thread","created_at":1710348075,"metadata":{}} + + event: thread.run.created + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"tool_resources":{},"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"} + + event: thread.run.step.created + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.run.step.in_progress + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.message.created + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} + + event: thread.message.in_progress + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[], "metadata":{}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + ... + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + event: thread.message.completed + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}], "metadata":{}} + + event: thread.run.step.completed + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + event: thread.run.completed + {"id":"run_123","object":"thread.run","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1713226836,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1713226837,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"} + + event: done + data: [DONE] + + - title: Streaming with Functions + request: + curl: | + curl https://api.openai.com/v1/threads/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123", + "thread": { + "messages": [ + {"role": "user", "content": "What is the weather like in San Francisco?"} + ] + }, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + stream = client.beta.threads.create_and_run( + thread={ + "messages": [ + {"role": "user", "content": "What is the weather like in San Francisco?"} + ] + }, + assistant_id="asst_abc123", + tools=tools, + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + async function main() { + const stream = await openai.beta.threads.createAndRun({ + assistant_id: "asst_123", + thread: { + messages: [ + { role: "user", content: "What is the weather like in San Francisco?" }, + ], + }, + tools: tools, + stream: true + }); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: | + event: thread.created + data: {"id":"thread_123","object":"thread","created_at":1710351818,"metadata":{}} + + event: thread.run.created + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.step.created + data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + + event: thread.run.step.in_progress + data: {"id":"step_001","object":"thread.run.step","created_at":1710351819,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710352418,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[]},"usage":null} + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"","output":null}}]}}} + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"{\""}}]}}} + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"location"}}]}}} + + ... + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"ahrenheit"}}]}}} + + event: thread.run.step.delta + data: {"id":"step_001","object":"thread.run.step.delta","delta":{"step_details":{"type":"tool_calls","tool_calls":[{"index":0,"type":"function","function":{"arguments":"\"}"}}]}}} + + event: thread.run.requires_action + data: {"id":"run_123","object":"thread.run","created_at":1710351818,"assistant_id":"asst_123","thread_id":"thread_123","status":"requires_action","started_at":1710351818,"expires_at":1710352418,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":{"type":"submit_tool_outputs","submit_tool_outputs":{"tool_calls":[{"id":"call_XXNp8YGaFrjrSjgqxtC8JJ1B","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}"}}]}},"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":345,"completion_tokens":11,"total_tokens":356},"response_format":"auto","tool_choice":"auto"}} + + event: done + data: [DONE] + + /threads/{thread_id}/runs: + get: + operationId: listRuns + tags: + - Assistants + summary: Returns a list of runs belonging to a thread. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run belongs to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunsResponse" + x-oaiMeta: + name: List runs + group: threads + beta: true + returns: A list of [run](/docs/api-reference/runs/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + runs = client.beta.threads.runs.list( + "thread_abc123" + ) + + print(runs) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const runs = await openai.beta.threads.runs.list( + "thread_abc123" + ); + + console.log(runs); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "tool_resources": { + "code_interpreter": { + "file_ids": [ + "file-abc123", + "file-abc456" + ] + } + }, + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto" + }, + { + "id": "run_abc456", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "tool_resources": { + "code_interpreter": { + "file_ids": [ + "file-abc123", + "file-abc456" + ] + } + }, + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto" + } + ], + "first_id": "run_abc123", + "last_id": "run_abc456", + "has_more": false + } + post: + operationId: createRun + tags: + - Assistants + summary: Create a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to run. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Create run + group: threads + beta: true + returns: A [run](/docs/api-reference/runs/object) object. + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123" + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.create( + "thread_abc123", + { assistant_id: "asst_abc123" } + ); + + console.log(run); + } + + main(); + response: &run_object_example | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699063290, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "queued", + "started_at": 1699063290, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699063291, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "usage": null, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto" + } + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/thread_123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_123", + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.runs.create( + thread_id="thread_123", + assistant_id="asst_123", + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.beta.threads.runs.create( + "thread_123", + { assistant_id: "asst_123", stream: true } + ); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: | + event: thread.run.created + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710330641,"expires_at":1710331240,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.step.created + data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.run.step.in_progress + data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.message.created + data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + event: thread.message.in_progress + data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + ... + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + event: thread.message.completed + data: {"id":"msg_001","object":"thread.message","created_at":1710330641,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710330642,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} + + event: thread.run.step.completed + data: {"id":"step_001","object":"thread.run.step","created_at":1710330641,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710330642,"expires_at":1710331240,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + event: thread.run.completed + data: {"id":"run_123","object":"thread.run","created_at":1710330640,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710330641,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710330642,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + + event: done + data: [DONE] + + - title: Streaming with Functions + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "assistant_id": "asst_abc123", + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ] + + stream = client.beta.threads.runs.create( + thread_id="thread_abc123", + assistant_id="asst_abc123", + tools=tools, + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + const tools = [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + }, + "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, + }, + "required": ["location"], + }, + } + } + ]; + + async function main() { + const stream = await openai.beta.threads.runs.create( + "thread_abc123", + { + assistant_id: "asst_abc123", + tools: tools, + stream: true + } + ); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: | + event: thread.run.created + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":null,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710348075,"expires_at":1710348675,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.step.created + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.run.step.in_progress + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":null} + + event: thread.message.created + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + event: thread.message.in_progress + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"Hello","annotations":[]}}]}} + + ... + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" today"}}]}} + + event: thread.message.delta + data: {"id":"msg_001","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"?"}}]}} + + event: thread.message.completed + data: {"id":"msg_001","object":"thread.message","created_at":1710348076,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710348077,"role":"assistant","content":[{"type":"text","text":{"value":"Hello! How can I assist you today?","annotations":[]}}],"metadata":{}} + + event: thread.run.step.completed + data: {"id":"step_001","object":"thread.run.step","created_at":1710348076,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710348077,"expires_at":1710348675,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_001"}},"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31}} + + event: thread.run.completed + data: {"id":"run_123","object":"thread.run","created_at":1710348075,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710348075,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710348077,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + + event: done + data: [DONE] + + /threads/{thread_id}/runs/{run_id}: + get: + operationId: getRun + tags: + - Assistants + summary: Retrieves a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Retrieve run + group: threads + beta: true + returns: The [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.retrieve( + thread_id="thread_abc123", + run_id="run_abc123" + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.retrieve( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "metadata": {}, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto" + } + post: + operationId: modifyRun + tags: + - Assistants + summary: Modifies a run. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) that was run. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/ModifyRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Modify run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "metadata": { + "user_id": "user_abc123" + } + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.update( + thread_id="thread_abc123", + run_id="run_abc123", + metadata={"user_id": "user_abc123"}, + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.update( + "thread_abc123", + "run_abc123", + { + metadata: { + user_id: "user_abc123", + }, + } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699075072, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699075072, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699075073, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "incomplete_details": null, + "tools": [ + { + "type": "code_interpreter" + } + ], + "tool_resources": { + "code_interpreter": { + "file_ids": [ + "file-abc123", + "file-abc456" + ] + } + }, + "metadata": { + "user_id": "user_abc123" + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto" + } + + /threads/{thread_id}/runs/{run_id}/submit_tool_outputs: + post: + operationId: submitToolOuputsToRun + tags: + - Assistants + summary: | + When a run has the `status: "requires_action"` and `required_action.type` is `submit_tool_outputs`, this endpoint can be used to submit the outputs from the tool calls once they're all completed. All outputs must be submitted in a single request. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the [thread](/docs/api-reference/threads) to which this run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run that requires the tool output submission. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/SubmitToolOutputsRunRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Submit tool outputs to run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + - title: Default + request: + curl: | + curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "tool_outputs": [ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ] + }' + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_123", + run_id="run_123", + tool_outputs=[ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ] + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.submitToolOutputs( + "thread_123", + "run_123", + { + tool_outputs: [ + { + tool_call_id: "call_001", + output: "70 degrees and sunny.", + }, + ], + } + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_123", + "object": "thread.run", + "created_at": 1699075592, + "assistant_id": "asst_123", + "thread_id": "thread_123", + "status": "queued", + "started_at": 1699075592, + "expires_at": 1699076192, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "tools": [ + { + "type": "function", + "function": { + "name": "get_current_weather", + "description": "Get the current weather in a given location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA" + }, + "unit": { + "type": "string", + "enum": ["celsius", "fahrenheit"] + } + }, + "required": ["location"] + } + } + } + ], + "metadata": {}, + "usage": null, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto" + } + + - title: Streaming + request: + curl: | + curl https://api.openai.com/v1/threads/thread_123/runs/run_123/submit_tool_outputs \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "tool_outputs": [ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ], + "stream": true + }' + python: | + from openai import OpenAI + client = OpenAI() + + stream = client.beta.threads.runs.submit_tool_outputs( + thread_id="thread_123", + run_id="run_123", + tool_outputs=[ + { + "tool_call_id": "call_001", + "output": "70 degrees and sunny." + } + ], + stream=True + ) + + for event in stream: + print(event) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const stream = await openai.beta.threads.runs.submitToolOutputs( + "thread_123", + "run_123", + { + tool_outputs: [ + { + tool_call_id: "call_001", + output: "70 degrees and sunny.", + }, + ], + } + ); + + for await (const event of stream) { + console.log(event); + } + } + + main(); + response: | + event: thread.run.step.completed + data: {"id":"step_001","object":"thread.run.step","created_at":1710352449,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"tool_calls","status":"completed","cancelled_at":null,"completed_at":1710352475,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"tool_calls","tool_calls":[{"id":"call_iWr0kQ2EaYMaxNdl0v3KYkx7","type":"function","function":{"name":"get_current_weather","arguments":"{\"location\":\"San Francisco, CA\",\"unit\":\"fahrenheit\"}","output":"70 degrees and sunny."}}]},"usage":{"prompt_tokens":291,"completion_tokens":24,"total_tokens":315}} + + event: thread.run.queued + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"queued","started_at":1710352448,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.in_progress + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"in_progress","started_at":1710352475,"expires_at":1710353047,"cancelled_at":null,"failed_at":null,"completed_at":null,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":null,"response_format":"auto","tool_choice":"auto"}} + + event: thread.run.step.created + data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} + + event: thread.run.step.in_progress + data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"in_progress","cancelled_at":null,"completed_at":null,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":null} + + event: thread.message.created + data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + event: thread.message.in_progress + data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"in_progress","incomplete_details":null,"incomplete_at":null,"completed_at":null,"role":"assistant","content":[],"metadata":{}} + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"The","annotations":[]}}]}} + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" current"}}]}} + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" weather"}}]}} + + ... + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":" sunny"}}]}} + + event: thread.message.delta + data: {"id":"msg_002","object":"thread.message.delta","delta":{"content":[{"index":0,"type":"text","text":{"value":"."}}]}} + + event: thread.message.completed + data: {"id":"msg_002","object":"thread.message","created_at":1710352476,"assistant_id":"asst_123","thread_id":"thread_123","run_id":"run_123","status":"completed","incomplete_details":null,"incomplete_at":null,"completed_at":1710352477,"role":"assistant","content":[{"type":"text","text":{"value":"The current weather in San Francisco, CA is 70 degrees Fahrenheit and sunny.","annotations":[]}}],"metadata":{}} + + event: thread.run.step.completed + data: {"id":"step_002","object":"thread.run.step","created_at":1710352476,"run_id":"run_123","assistant_id":"asst_123","thread_id":"thread_123","type":"message_creation","status":"completed","cancelled_at":null,"completed_at":1710352477,"expires_at":1710353047,"failed_at":null,"last_error":null,"step_details":{"type":"message_creation","message_creation":{"message_id":"msg_002"}},"usage":{"prompt_tokens":329,"completion_tokens":18,"total_tokens":347}} + + event: thread.run.completed + data: {"id":"run_123","object":"thread.run","created_at":1710352447,"assistant_id":"asst_123","thread_id":"thread_123","status":"completed","started_at":1710352475,"expires_at":null,"cancelled_at":null,"failed_at":null,"completed_at":1710352477,"required_action":null,"last_error":null,"model":"gpt-4-turbo","instructions":null,"tools":[{"type":"function","function":{"name":"get_current_weather","description":"Get the current weather in a given location","parameters":{"type":"object","properties":{"location":{"type":"string","description":"The city and state, e.g. San Francisco, CA"},"unit":{"type":"string","enum":["celsius","fahrenheit"]}},"required":["location"]}}}],"metadata":{},"temperature":1.0,"top_p":1.0,"max_completion_tokens":null,"max_prompt_tokens":null,"truncation_strategy":{"type":"auto","last_messages":null},"incomplete_details":null,"usage":{"prompt_tokens":20,"completion_tokens":11,"total_tokens":31},"response_format":"auto","tool_choice":"auto"}} + + event: done + data: [DONE] + + /threads/{thread_id}/runs/{run_id}/cancel: + post: + operationId: cancelRun + tags: + - Assistants + summary: Cancels a run that is `in_progress`. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which this run belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunObject" + x-oaiMeta: + name: Cancel a run + group: threads + beta: true + returns: The modified [run](/docs/api-reference/runs/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "OpenAI-Beta: assistants=v2" \ + -X POST + python: | + from openai import OpenAI + client = OpenAI() + + run = client.beta.threads.runs.cancel( + thread_id="thread_abc123", + run_id="run_abc123" + ) + + print(run) + node.js: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const run = await openai.beta.threads.runs.cancel( + "thread_abc123", + "run_abc123" + ); + + console.log(run); + } + + main(); + response: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1699076126, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "cancelling", + "started_at": 1699076126, + "expires_at": 1699076726, + "cancelled_at": null, + "failed_at": null, + "completed_at": null, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": "You summarize books.", + "tools": [ + { + "type": "file_search" + } + ], + "tool_resources": { + "file_search": { + "vector_store_ids": ["vs_123"] + } + }, + "metadata": {}, + "usage": null, + "temperature": 1.0, + "top_p": 1.0, + "response_format": "auto" + } + + /threads/{thread_id}/runs/{run_id}/steps: + get: + operationId: listRunSteps + tags: + - Assistants + summary: Returns a list of run steps belonging to a run. + parameters: + - name: thread_id + in: path + required: true + schema: + type: string + description: The ID of the thread the run and run steps belong to. + - name: run_id + in: path + required: true + schema: + type: string + description: The ID of the run the run steps belong to. + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListRunStepsResponse" + x-oaiMeta: + name: List run steps + group: threads + beta: true + returns: A list of [run step](/docs/api-reference/runs/step-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + run_steps = client.beta.threads.runs.steps.list( + thread_id="thread_abc123", + run_id="run_abc123" + ) + + print(run_steps) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.list( + "thread_abc123", + "run_abc123" + ); + console.log(runStep); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + ], + "first_id": "step_abc123", + "last_id": "step_abc456", + "has_more": false + } + + /threads/{thread_id}/runs/{run_id}/steps/{step_id}: + get: + operationId: getRunStep + tags: + - Assistants + summary: Retrieves a run step. + parameters: + - in: path + name: thread_id + required: true + schema: + type: string + description: The ID of the thread to which the run and run step belongs. + - in: path + name: run_id + required: true + schema: + type: string + description: The ID of the run to which the run step belongs. + - in: path + name: step_id + required: true + schema: + type: string + description: The ID of the run step to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/RunStepObject" + x-oaiMeta: + name: Retrieve run step + group: threads + beta: true + returns: The [run step](/docs/api-reference/runs/step-object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/threads/thread_abc123/runs/run_abc123/steps/step_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + run_step = client.beta.threads.runs.steps.retrieve( + thread_id="thread_abc123", + run_id="run_abc123", + step_id="step_abc123" + ) + + print(run_step) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const runStep = await openai.beta.threads.runs.steps.retrieve( + "thread_abc123", + "run_abc123", + "step_abc123" + ); + console.log(runStep); + } + + main(); + response: &run_step_object_example | + { + "id": "step_abc123", + "object": "thread.run.step", + "created_at": 1699063291, + "run_id": "run_abc123", + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "type": "message_creation", + "status": "completed", + "cancelled_at": null, + "completed_at": 1699063291, + "expired_at": null, + "failed_at": null, + "last_error": null, + "step_details": { + "type": "message_creation", + "message_creation": { + "message_id": "msg_abc123" + } + }, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + } + } + + /vector_stores: + get: + operationId: listVectorStores + tags: + - Vector Stores + summary: Returns a list of vector stores. + parameters: + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListVectorStoresResponse" + x-oaiMeta: + name: List vector stores + group: vector_stores + beta: true + returns: A list of [vector store](/docs/api-reference/vector-stores/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_stores = client.beta.vector_stores.list() + print(vector_stores) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStores = await openai.beta.vectorStores.list(); + console.log(vectorStores); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + }, + { + "id": "vs_abc456", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ v2", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + } + ], + "first_id": "vs_abc123", + "last_id": "vs_abc456", + "has_more": false + } + post: + operationId: createVectorStore + tags: + - Vector Stores + summary: Create a vector store. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateVectorStoreRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreObject" + x-oaiMeta: + name: Create vector store + group: vector_stores + beta: true + returns: A [vector store](/docs/api-reference/vector-stores/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + -d '{ + "name": "Support FAQ" + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store = client.beta.vector_stores.create( + name="Support FAQ" + ) + print(vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStore = await openai.beta.vectorStores.create({ + name: "Support FAQ" + }); + console.log(vectorStore); + } + + main(); + response: | + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + } + + /vector_stores/{vector_store_id}: + get: + operationId: getVectorStore + tags: + - Vector Stores + summary: Retrieves a vector store. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store to retrieve. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreObject" + x-oaiMeta: + name: Retrieve vector store + group: vector_stores + beta: true + returns: The [vector store](/docs/api-reference/vector-stores/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store = client.beta.vector_stores.retrieve( + vector_store_id="vs_abc123" + ) + print(vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStore = await openai.beta.vectorStores.retrieve( + "vs_abc123" + ); + console.log(vectorStore); + } + + main(); + response: | + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776 + } + post: + operationId: modifyVectorStore + tags: + - Vector Stores + summary: Modifies a vector store. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store to modify. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/UpdateVectorStoreRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreObject" + x-oaiMeta: + name: Modify vector store + group: vector_stores + beta: true + returns: The modified [vector store](/docs/api-reference/vector-stores/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + -d '{ + "name": "Support FAQ" + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store = client.beta.vector_stores.update( + vector_store_id="vs_abc123", + name="Support FAQ" + ) + print(vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStore = await openai.beta.vectorStores.update( + "vs_abc123", + { + name: "Support FAQ" + } + ); + console.log(vectorStore); + } + + main(); + response: | + { + "id": "vs_abc123", + "object": "vector_store", + "created_at": 1699061776, + "name": "Support FAQ", + "bytes": 139920, + "file_counts": { + "in_progress": 0, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 3 + } + } + + delete: + operationId: deleteVectorStore + tags: + - Vector Stores + summary: Delete a vector store. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteVectorStoreResponse" + x-oaiMeta: + name: Delete vector store + group: vector_stores + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + deleted_vector_store = client.beta.vector_stores.delete( + vector_store_id="vs_abc123" + ) + print(deleted_vector_store) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const deletedVectorStore = await openai.beta.vectorStores.del( + "vs_abc123" + ); + console.log(deletedVectorStore); + } + + main(); + response: | + { + id: "vs_abc123", + object: "vector_store.deleted", + deleted: true + } + + /vector_stores/{vector_store_id}/files: + get: + operationId: listVectorStoreFiles + tags: + - Vector Stores + summary: Returns a list of vector store files. + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the files belong to. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + - name: filter + in: query + description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." + schema: + type: string + enum: ["in_progress", "completed", "failed", "cancelled"] + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListVectorStoreFilesResponse" + x-oaiMeta: + name: List vector store files + group: vector_stores + beta: true + returns: A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_files = client.beta.vector_stores.files.list( + vector_store_id="vs_abc123" + ) + print(vector_store_files) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStoreFiles = await openai.beta.vectorStores.files.list( + "vs_abc123" + ); + console.log(vectorStoreFiles); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + }, + { + "id": "file-abc456", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc456", + "has_more": false + } + post: + operationId: createVectorStoreFile + tags: + - Vector Stores + summary: Create a vector store file by attaching a [File](/docs/api-reference/files) to a [vector store](/docs/api-reference/vector-stores/object). + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: | + The ID of the vector store for which to create a File. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateVectorStoreFileRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileObject" + x-oaiMeta: + name: Create vector store file + group: vector_stores + beta: true + returns: A [vector store file](/docs/api-reference/vector-stores-files/file-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "file_id": "file-abc123" + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_file = client.beta.vector_stores.files.create( + vector_store_id="vs_abc123", + file_id="file-abc123" + ) + print(vector_store_file) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const myVectorStoreFile = await openai.beta.vectorStores.files.create( + "vs_abc123", + { + file_id: "file-abc123" + } + ); + console.log(myVectorStoreFile); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "usage_bytes": 1234, + "vector_store_id": "vs_abcd", + "status": "completed", + "last_error": null + } + + /vector_stores/{vector_store_id}/files/{file_id}: + get: + operationId: getVectorStoreFile + tags: + - Vector Stores + summary: Retrieves a vector store file. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: The ID of the vector store that the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + example: file-abc123 + description: The ID of the file being retrieved. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileObject" + x-oaiMeta: + name: Retrieve vector store file + group: vector_stores + beta: true + returns: The [vector store file](/docs/api-reference/vector-stores-files/file-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_file = client.beta.vector_stores.files.retrieve( + vector_store_id="vs_abc123", + file_id="file-abc123" + ) + print(vector_store_file) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStoreFile = await openai.beta.vectorStores.files.retrieve( + "vs_abc123", + "file-abc123" + ); + console.log(vectorStoreFile); + } + + main(); + response: | + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abcd", + "status": "completed", + "last_error": null + } + delete: + operationId: deleteVectorStoreFile + tags: + - Vector Stores + summary: Delete a vector store file. This will remove the file from the vector store but the file itself will not be deleted. To delete the file, use the [delete file](/docs/api-reference/files/delete) endpoint. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store that the file belongs to. + - in: path + name: file_id + required: true + schema: + type: string + description: The ID of the file to delete. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/DeleteVectorStoreFileResponse" + x-oaiMeta: + name: Delete vector store file + group: vector_stores + beta: true + returns: Deletion status + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files/file-abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -X DELETE + python: | + from openai import OpenAI + client = OpenAI() + + deleted_vector_store_file = client.beta.vector_stores.files.delete( + vector_store_id="vs_abc123", + file_id="file-abc123" + ) + print(deleted_vector_store_file) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const deletedVectorStoreFile = await openai.beta.vectorStores.files.del( + "vs_abc123", + "file-abc123" + ); + console.log(deletedVectorStoreFile); + } + + main(); + response: | + { + id: "file-abc123", + object: "vector_store.file.deleted", + deleted: true + } + + /vector_stores/{vector_store_id}/file_batches: + post: + operationId: createVectorStoreFileBatch + tags: + - Vector Stores + summary: Create a vector store file batch. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: | + The ID of the vector store for which to create a File Batch. + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/CreateVectorStoreFileBatchRequest" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileBatchObject" + x-oaiMeta: + name: Create vector store file batch + group: vector_stores + beta: true + returns: A [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/file_batches \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json \ + -H "OpenAI-Beta: assistants=v2" \ + -d '{ + "file_ids": ["file-abc123", "file-abc456"] + }' + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_file_batch = client.beta.vector_stores.file_batches.create( + vector_store_id="vs_abc123", + file_ids=["file-abc123", "file-abc456"] + ) + print(vector_store_file_batch) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const myVectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.create( + "vs_abc123", + { + file_ids: ["file-abc123", "file-abc456"] + } + ); + console.log(myVectorStoreFileBatch); + } + + main(); + response: | + { + "id": "vsfb_abc123", + "object": "vector_store.file_batch", + "created_at": 1699061776, + "vector_store_id": "vs_abc123", + "status": "in_progress", + "file_counts": { + "in_progress": 1, + "completed": 1, + "failed": 0, + "cancelled": 0, + "total": 0, + } + } + + /vector_stores/{vector_store_id}/file_batches/{batch_id}: + get: + operationId: getVectorStoreFileBatch + tags: + - Vector Stores + summary: Retrieves a vector store file batch. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + example: vs_abc123 + description: The ID of the vector store that the file batch belongs to. + - in: path + name: batch_id + required: true + schema: + type: string + example: vsfb_abc123 + description: The ID of the file batch being retrieved. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileBatchObject" + x-oaiMeta: + name: Retrieve vector store file batch + group: vector_stores + beta: true + returns: The [vector store file batch](/docs/api-reference/vector-stores-file-batches/batch-object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_file_batch = client.beta.vector_stores.file_batches.retrieve( + vector_store_id="vs_abc123", + batch_id="vsfb_abc123" + ) + print(vector_store_file_batch) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStoreFileBatch = await openai.beta.vectorStores.fileBatches.retrieve( + "vs_abc123", + "vsfb_abc123" + ); + console.log(vectorStoreFileBatch); + } + + main(); + response: | + { + "id": "vsfb_abc123", + "object": "vector_store.file_batch", + "created_at": 1699061776, + "vector_store_id": "vs_abc123", + "status": "in_progress", + "file_counts": { + "in_progress": 1, + "completed": 1, + "failed": 0, + "cancelled": 0, + "total": 0, + } + } + + /vector_stores/{vector_store_id}/file_batches/{batch_id}/cancel: + post: + operationId: cancelVectorStoreFileBatch + tags: + - Vector Stores + summary: Cancel a vector store file batch. This attempts to cancel the processing of files in this batch as soon as possible. + parameters: + - in: path + name: vector_store_id + required: true + schema: + type: string + description: The ID of the vector store that the file batch belongs to. + - in: path + name: batch_id + required: true + schema: + type: string + description: The ID of the file batch to cancel. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/VectorStoreFileBatchObject" + x-oaiMeta: + name: Cancel vector store file batch + group: vector_stores + beta: true + returns: The modified vector store file batch object. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" \ + -X POST + python: | + from openai import OpenAI + client = OpenAI() + + deleted_vector_store_file_batch = client.beta.vector_stores.file_batches.cancel( + vector_store_id="vs_abc123", + file_batch_id="vsfb_abc123" + ) + print(deleted_vector_store_file_batch) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const deletedVectorStoreFileBatch = await openai.vector_stores.fileBatches.cancel( + "vs_abc123", + "vsfb_abc123" + ); + console.log(deletedVectorStoreFileBatch); + } + + main(); + response: | + { + "id": "vsfb_abc123", + "object": "vector_store.file_batch", + "created_at": 1699061776, + "vector_store_id": "vs_abc123", + "status": "cancelling", + "file_counts": { + "in_progress": 12, + "completed": 3, + "failed": 0, + "cancelled": 0, + "total": 15, + } + } + + /vector_stores/{vector_store_id}/file_batches/{batch_id}/files: + get: + operationId: listFilesInVectorStoreBatch + tags: + - Vector Stores + summary: Returns a list of vector store files in a batch. + parameters: + - name: vector_store_id + in: path + description: The ID of the vector store that the files belong to. + required: true + schema: + type: string + - name: batch_id + in: path + description: The ID of the file batch that the files belong to. + required: true + schema: + type: string + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + - name: order + in: query + description: *pagination_order_param_description + schema: + type: string + default: desc + enum: ["asc", "desc"] + - name: after + in: query + description: *pagination_after_param_description + schema: + type: string + - name: before + in: query + description: *pagination_before_param_description + schema: + type: string + - name: filter + in: query + description: "Filter by file status. One of `in_progress`, `completed`, `failed`, `cancelled`." + schema: + type: string + enum: ["in_progress", "completed", "failed", "cancelled"] + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/ListVectorStoreFilesResponse" + x-oaiMeta: + name: List vector store files in a batch + group: vector_stores + beta: true + returns: A list of [vector store file](/docs/api-reference/vector-stores-files/file-object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/vector_stores/vs_abc123/files_batches/vsfb_abc123/files \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -H "OpenAI-Beta: assistants=v2" + python: | + from openai import OpenAI + client = OpenAI() + + vector_store_files = client.beta.vector_stores.file_batches.list_files( + vector_store_id="vs_abc123", + batch_id="vsfb_abc123" + ) + print(vector_store_files) + node.js: | + import OpenAI from "openai"; + const openai = new OpenAI(); + + async function main() { + const vectorStoreFiles = await openai.beta.vectorStores.fileBatches.listFiles( + "vs_abc123", + "vsfb_abc123" + ); + console.log(vectorStoreFiles); + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "file-abc123", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + }, + { + "id": "file-abc456", + "object": "vector_store.file", + "created_at": 1699061776, + "vector_store_id": "vs_abc123" + } + ], + "first_id": "file-abc123", + "last_id": "file-abc456", + "has_more": false + } + + /batches: + post: + summary: Creates and executes a batch from an uploaded file of requests + operationId: createBatch + tags: + - Batch + requestBody: + required: true + content: + application/json: + schema: + type: object + required: + - input_file_id + - endpoint + - completion_window + properties: + input_file_id: + type: string + description: | + The ID of an uploaded file that contains requests for the new batch. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your input file must be formatted as a [JSONL file](/docs/api-reference/batch/requestInput), and must be uploaded with the purpose `batch`. + endpoint: + type: string + enum: ["/v1/chat/completions", "/v1/embeddings"] + description: The endpoint to be used for all requests in the batch. Currently `/v1/chat/completions` and `/v1/embeddings` are supported. + completion_window: + type: string + enum: ["24h"] + description: The time frame within which the batch should be processed. Currently only `24h` is supported. + metadata: + type: object + additionalProperties: + type: string + description: Optional custom metadata for the batch. + nullable: true + responses: + "200": + description: Batch created successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Batch" + x-oaiMeta: + name: Create batch + group: batch + returns: The created [Batch](/docs/api-reference/batch/object) object. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -d '{ + "input_file_id": "file-abc123", + "endpoint": "/v1/chat/completions", + "completion_window": "24h" + }' + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.create( + input_file_id="file-abc123", + endpoint="/v1/chat/completions", + completion_window="24h" + ) + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const batch = await openai.batches.create({ + input_file_id: "file-abc123", + endpoint: "/v1/chat/completions", + completion_window: "24h" + }); + + console.log(batch); + } + + main(); + response: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "validating", + "output_file_id": null, + "error_file_id": null, + "created_at": 1711471533, + "in_progress_at": null, + "expires_at": null, + "finalizing_at": null, + "completed_at": null, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 0, + "completed": 0, + "failed": 0 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + get: + operationId: listBatches + tags: + - Batch + summary: List your organization's batches. + parameters: + - in: query + name: after + required: false + schema: + type: string + description: *pagination_after_param_description + - name: limit + in: query + description: *pagination_limit_param_description + required: false + schema: + type: integer + default: 20 + responses: + "200": + description: Batch listed successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/ListBatchesResponse" + x-oaiMeta: + name: List batch + group: batch + returns: A list of paginated [Batch](/docs/api-reference/batch/object) objects. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches?limit=2 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.list() + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const list = await openai.batches.list(); + + for await (const batch of list) { + console.log(batch); + } + } + + main(); + response: | + { + "object": "list", + "data": [ + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "completed", + "output_file_id": "file-cvaTdG", + "error_file_id": "file-HOWS94", + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": 1711493133, + "completed_at": 1711493163, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 95, + "failed": 5 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly job", + } + }, + { ... }, + ], + "first_id": "batch_abc123", + "last_id": "batch_abc456", + "has_more": true + } + + /batches/{batch_id}: + get: + operationId: retrieveBatch + tags: + - Batch + summary: Retrieves a batch. + parameters: + - in: path + name: batch_id + required: true + schema: + type: string + description: The ID of the batch to retrieve. + responses: + "200": + description: Batch retrieved successfully. + content: + application/json: + schema: + $ref: "#/components/schemas/Batch" + x-oaiMeta: + name: Retrieve batch + group: batch + returns: The [Batch](/docs/api-reference/batch/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches/batch_abc123 \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.retrieve("batch_abc123") + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const batch = await openai.batches.retrieve("batch_abc123"); + + console.log(batch); + } + + main(); + response: &batch_object | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "completed", + "output_file_id": "file-cvaTdG", + "error_file_id": "file-HOWS94", + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": 1711493133, + "completed_at": 1711493163, + "failed_at": null, + "expired_at": null, + "cancelling_at": null, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 95, + "failed": 5 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + + /batches/{batch_id}/cancel: + post: + operationId: cancelBatch + tags: + - Batch + summary: Cancels an in-progress batch. + parameters: + - in: path + name: batch_id + required: true + schema: + type: string + description: The ID of the batch to cancel. + responses: + "200": + description: Batch is cancelling. Returns the cancelling batch's details. + content: + application/json: + schema: + $ref: "#/components/schemas/Batch" + x-oaiMeta: + name: Cancel batch + group: batch + returns: The [Batch](/docs/api-reference/batch/object) object matching the specified ID. + examples: + request: + curl: | + curl https://api.openai.com/v1/batches/batch_abc123/cancel \ + -H "Authorization: Bearer $OPENAI_API_KEY" \ + -H "Content-Type: application/json" \ + -X POST + python: | + from openai import OpenAI + client = OpenAI() + + client.batches.cancel("batch_abc123") + node: | + import OpenAI from "openai"; + + const openai = new OpenAI(); + + async function main() { + const batch = await openai.batches.cancel("batch_abc123"); + + console.log(batch); + } + + main(); + response: | + { + "id": "batch_abc123", + "object": "batch", + "endpoint": "/v1/completions", + "errors": null, + "input_file_id": "file-abc123", + "completion_window": "24h", + "status": "cancelling", + "output_file_id": null, + "error_file_id": null, + "created_at": 1711471533, + "in_progress_at": 1711471538, + "expires_at": 1711557933, + "finalizing_at": null, + "completed_at": null, + "failed_at": null, + "expired_at": null, + "cancelling_at": 1711475133, + "cancelled_at": null, + "request_counts": { + "total": 100, + "completed": 23, + "failed": 1 + }, + "metadata": { + "customer_id": "user_123456789", + "batch_description": "Nightly eval job", + } + } + +components: + securitySchemes: + ApiKeyAuth: + type: http + scheme: "bearer" + + schemas: + Error: + type: object + properties: + code: + type: string + nullable: true + message: + type: string + nullable: false + param: + type: string + nullable: true + type: + type: string + nullable: false + required: + - type + - message + - param + - code + ErrorResponse: + type: object + properties: + error: + $ref: "#/components/schemas/Error" + required: + - error + + ListModelsResponse: + type: object + properties: + object: + type: string + enum: [list] + data: + type: array + items: + $ref: "#/components/schemas/Model" + required: + - object + - data + DeleteModelResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + required: + - id + - object + - deleted + + CreateCompletionRequest: + type: object + properties: + model: + description: &model_description | + ID of the model to use. You can use the [List models](/docs/api-reference/models/list) API to see all of your available models, or see our [Model overview](/docs/models/overview) for descriptions of them. + anyOf: + - type: string + - type: string + enum: ["gpt-3.5-turbo-instruct", "davinci-002", "babbage-002"] + x-oaiTypeLabel: string + prompt: + description: &completions_prompt_description | + The prompt(s) to generate completions for, encoded as a string, array of strings, array of tokens, or array of token arrays. + + Note that <|endoftext|> is the document separator that the model sees during training, so if a prompt is not specified the model will generate as if from the beginning of a new document. + default: "<|endoftext|>" + nullable: true + oneOf: + - type: string + default: "" + example: "This is a test." + - type: array + items: + type: string + default: "" + example: "This is a test." + - type: array + minItems: 1 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + minItems: 1 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + best_of: + type: integer + default: 1 + minimum: 0 + maximum: 20 + nullable: true + description: &completions_best_of_description | + Generates `best_of` completions server-side and returns the "best" (the one with the highest log probability per token). Results cannot be streamed. + + When used with `n`, `best_of` controls the number of candidate completions and `n` specifies how many to return – `best_of` must be greater than `n`. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + echo: + type: boolean + default: false + nullable: true + description: &completions_echo_description > + Echo back the prompt in addition to the completion + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_frequency_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + logit_bias: &completions_logit_bias + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: &completions_logit_bias_description | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the GPT tokenizer) to an associated bias value from -100 to 100. You can use this [tokenizer tool](/tokenizer?view=bpe) to convert text to token IDs. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + + As an example, you can pass `{"50256": -100}` to prevent the <|endoftext|> token from being generated. + logprobs: &completions_logprobs_configuration + type: integer + minimum: 0 + maximum: 5 + default: null + nullable: true + description: &completions_logprobs_description | + Include the log probabilities on the `logprobs` most likely output tokens, as well the chosen tokens. For example, if `logprobs` is 5, the API will return a list of the 5 most likely tokens. The API will always return the `logprob` of the sampled token, so there may be up to `logprobs+1` elements in the response. + + The maximum value for `logprobs` is 5. + max_tokens: + type: integer + minimum: 0 + default: 16 + example: 16 + nullable: true + description: &completions_max_tokens_description | + The maximum number of [tokens](/tokenizer) that can be generated in the completion. + + The token count of your prompt plus `max_tokens` cannot exceed the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: &completions_completions_description | + How many completions to generate for each prompt. + + **Note:** Because this parameter generates many completions, it can quickly consume your token quota. Use carefully and ensure that you have reasonable settings for `max_tokens` and `stop`. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: &completions_presence_penalty_description | + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + seed: &completions_seed_param + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + stop: + description: &completions_stop_description > + Up to 4 sequences where the API will stop generating further tokens. The returned text will not contain the stop sequence. + default: null + nullable: true + oneOf: + - type: string + default: <|endoftext|> + example: "\n" + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + example: '["\n"]' + stream: + description: > + Whether to stream back partial progress. If set, tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + suffix: + description: | + The suffix that comes after a completion of inserted text. + + This parameter is only supported for `gpt-3.5-turbo-instruct`. + default: null + nullable: true + type: string + example: "test." + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: &completions_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &completions_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + user: &end_user_param_configuration + type: string + example: user-1234 + description: | + A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). + required: + - model + - prompt + + CreateCompletionResponse: + type: object + description: | + Represents a completion response from the API. Note: both the streamed and non-streamed response objects share the same shape (unlike the chat endpoint). + properties: + id: + type: string + description: A unique identifier for the completion. + choices: + type: array + description: The list of completion choices the model generated for the input prompt. + items: + type: object + required: + - finish_reason + - index + - logprobs + - text + properties: + finish_reason: + type: string + description: &completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + or `content_filter` if content was omitted due to a flag from our content filters. + enum: ["stop", "length", "content_filter"] + index: + type: integer + logprobs: + type: object + nullable: true + properties: + text_offset: + type: array + items: + type: integer + token_logprobs: + type: array + items: + type: number + tokens: + type: array + items: + type: string + top_logprobs: + type: array + items: + type: object + additionalProperties: + type: number + text: + type: string + created: + type: integer + description: The Unix timestamp (in seconds) of when the completion was created. + model: + type: string + description: The model used for completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always "text_completion" + enum: [text_completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - id + - object + - created + - model + - choices + x-oaiMeta: + name: The completion object + legacy: true + example: | + { + "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7", + "object": "text_completion", + "created": 1589478378, + "model": "gpt-4-turbo", + "choices": [ + { + "text": "\n\nThis is indeed a test", + "index": 0, + "logprobs": null, + "finish_reason": "length" + } + ], + "usage": { + "prompt_tokens": 5, + "completion_tokens": 7, + "total_tokens": 12 + } + } + + ChatCompletionRequestMessageContentPart: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartText" + - $ref: "#/components/schemas/ChatCompletionRequestMessageContentPartImage" + x-oaiExpandable: true + + ChatCompletionRequestMessageContentPartImage: + type: object + title: Image content part + properties: + type: + type: string + enum: ["image_url"] + description: The type of the content part. + image_url: + type: object + properties: + url: + type: string + description: Either a URL of the image or the base64 encoded image data. + format: uri + detail: + type: string + description: Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding). + enum: ["auto", "low", "high"] + default: "auto" + required: + - url + required: + - type + - image_url + + ChatCompletionRequestMessageContentPartText: + type: object + title: Text content part + properties: + type: + type: string + enum: ["text"] + description: The type of the content part. + text: + type: string + description: The text content. + required: + - type + - text + + ChatCompletionRequestMessage: + oneOf: + - $ref: "#/components/schemas/ChatCompletionRequestSystemMessage" + - $ref: "#/components/schemas/ChatCompletionRequestUserMessage" + - $ref: "#/components/schemas/ChatCompletionRequestAssistantMessage" + - $ref: "#/components/schemas/ChatCompletionRequestToolMessage" + - $ref: "#/components/schemas/ChatCompletionRequestFunctionMessage" + x-oaiExpandable: true + + ChatCompletionRequestSystemMessage: + type: object + title: System message + properties: + content: + description: The contents of the system message. + type: string + role: + type: string + enum: ["system"] + description: The role of the messages author, in this case `system`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestUserMessage: + type: object + title: User message + properties: + content: + description: | + The contents of the user message. + oneOf: + - type: string + description: The text contents of the message. + title: Text content + - type: array + description: An array of content parts with a defined type, each can be of type `text` or `image_url` when passing in images. You can pass multiple images by adding multiple `image_url` content parts. Image input is only supported when using the `gpt-4-visual-preview` model. + title: Array of content parts + items: + $ref: "#/components/schemas/ChatCompletionRequestMessageContentPart" + minItems: 1 + x-oaiExpandable: true + role: + type: string + enum: ["user"] + description: The role of the messages author, in this case `user`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + required: + - content + - role + + ChatCompletionRequestAssistantMessage: + type: object + title: Assistant message + properties: + content: + nullable: true + type: string + description: | + The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified. + role: + type: string + enum: ["assistant"] + description: The role of the messages author, in this case `assistant`. + name: + type: string + description: An optional name for the participant. Provides the model information to differentiate between participants of the same role. + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - arguments + - name + required: + - role + + ChatCompletionRequestToolMessage: + type: object + title: Tool message + properties: + role: + type: string + enum: ["tool"] + description: The role of the messages author, in this case `tool`. + content: + type: string + description: The contents of the tool message. + tool_call_id: + type: string + description: Tool call that this message is responding to. + required: + - role + - content + - tool_call_id + + ChatCompletionRequestFunctionMessage: + type: object + title: Function message + deprecated: true + properties: + role: + type: string + enum: ["function"] + description: The role of the messages author, in this case `function`. + content: + nullable: true + type: string + description: The contents of the function message. + name: + type: string + description: The name of the function to call. + required: + - role + - content + - name + + FunctionParameters: + type: object + description: "The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. \n\nOmitting `parameters` defines a function with an empty parameter list." + additionalProperties: true + + ChatCompletionFunctions: + type: object + deprecated: true + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionFunctionCallOption: + type: object + description: > + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + properties: + name: + type: string + description: The name of the function to call. + required: + - name + + ChatCompletionTool: + type: object + properties: + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + + FunctionObject: + type: object + properties: + description: + type: string + description: A description of what the function does, used by the model to choose when and how to call the function. + name: + type: string + description: The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64. + parameters: + $ref: "#/components/schemas/FunctionParameters" + required: + - name + + ChatCompletionToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + Specifying a particular tool via `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools are present. + oneOf: + - type: string + description: > + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + enum: [none, auto, required] + - $ref: "#/components/schemas/ChatCompletionNamedToolChoice" + x-oaiExpandable: true + + ChatCompletionNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific function. + properties: + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + - function + + ChatCompletionMessageToolCalls: + type: array + description: The tool calls generated by the model, such as function calls. + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCall" + + ChatCompletionMessageToolCall: + type: object + properties: + # TODO: index included when streaming + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + description: The function that the model called. + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - name + - arguments + required: + - id + - type + - function + + ChatCompletionMessageToolCallChunk: + type: object + properties: + index: + type: integer + id: + type: string + description: The ID of the tool call. + type: + type: string + enum: ["function"] + description: The type of the tool. Currently, only `function` is supported. + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + required: + - index + + # Note, this isn't referenced anywhere, but is kept as a convenience to record all possible roles in one place. + ChatCompletionRole: + type: string + description: The role of the author of a message + enum: + - system + - user + - assistant + - tool + - function + + ChatCompletionStreamOptions: + description: | + Options for streaming response. Only set this when you set `stream: true`. + type: object + nullable: true + default: null + properties: + include_usage: + type: boolean + description: | + If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value. + + ChatCompletionResponseMessage: + type: object + description: A chat completion message generated by the model. + properties: + content: + type: string + description: The contents of the message. + nullable: true + tool_calls: + $ref: "#/components/schemas/ChatCompletionMessageToolCalls" + role: + type: string + enum: ["assistant"] + description: The role of the author of this message. + function_call: + type: object + deprecated: true + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + required: + - name + - arguments + required: + - role + - content + + ChatCompletionStreamResponseDelta: + type: object + description: A chat completion delta generated by streamed model responses. + properties: + content: + type: string + description: The contents of the chunk message. + nullable: true + function_call: + deprecated: true + type: object + description: "Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model." + properties: + arguments: + type: string + description: The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function. + name: + type: string + description: The name of the function to call. + tool_calls: + type: array + items: + $ref: "#/components/schemas/ChatCompletionMessageToolCallChunk" + role: + type: string + enum: ["system", "user", "assistant", "tool"] + description: The role of the author of this message. + + CreateChatCompletionRequest: + type: object + properties: + messages: + description: A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models). + type: array + minItems: 1 + items: + $ref: "#/components/schemas/ChatCompletionRequestMessage" + model: + description: ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API. + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0301", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + frequency_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_frequency_penalty_description + logit_bias: + type: object + x-oaiTypeLabel: map + default: null + nullable: true + additionalProperties: + type: integer + description: | + Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + logprobs: + description: Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`. + type: boolean + default: false + nullable: true + top_logprobs: + description: An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used. + type: integer + minimum: 0 + maximum: 20 + nullable: true + max_tokens: + description: | + The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + type: integer + nullable: true + n: + type: integer + minimum: 1 + maximum: 128 + default: 1 + example: 1 + nullable: true + description: How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs. + presence_penalty: + type: number + default: 0 + minimum: -2 + maximum: 2 + nullable: true + description: *completions_presence_penalty_description + response_format: + type: object + description: | + An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + properties: + type: + type: string + enum: ["text", "json_object"] + example: "json_object" + default: "text" + description: Must be one of `text` or `json_object`. + seed: + type: integer + minimum: -9223372036854775808 + maximum: 9223372036854775807 + nullable: true + description: | + This feature is in Beta. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + x-oaiMeta: + beta: true + stop: + description: | + Up to 4 sequences where the API will stop generating further tokens. + default: null + oneOf: + - type: string + nullable: true + - type: array + minItems: 1 + maxItems: 4 + items: + type: string + stream: + description: > + If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + type: boolean + nullable: true + default: false + stream_options: + $ref: "#/components/schemas/ChatCompletionStreamOptions" + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *completions_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *completions_top_p_description + tools: + type: array + description: > + A list of tools the model may call. Currently, only functions are supported as a tool. + Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported. + items: + $ref: "#/components/schemas/ChatCompletionTool" + tool_choice: + $ref: "#/components/schemas/ChatCompletionToolChoiceOption" + user: *end_user_param_configuration + function_call: + deprecated: true + description: | + Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{"name": "my_function"}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + oneOf: + - type: string + description: > + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + enum: [none, auto] + - $ref: "#/components/schemas/ChatCompletionFunctionCallOption" + x-oaiExpandable: true + functions: + deprecated: true + description: | + Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + type: array + minItems: 1 + maxItems: 128 + items: + $ref: "#/components/schemas/ChatCompletionFunctions" + + required: + - model + - messages + + CreateChatCompletionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: &chat_completion_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + logprobs: &chat_completion_response_logprobs + description: Log probability information for the choice. + type: object + nullable: true + properties: + content: + description: A list of message content tokens with log probability information. + type: array + items: + $ref: "#/components/schemas/ChatCompletionTokenLogprob" + nullable: true + required: + - content + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_example + + CreateChatCompletionFunctionResponse: + type: object + description: Represents a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. + choices: + type: array + description: A list of chat completion choices. Can be more than one if `n` is greater than 1. + items: + type: object + required: + - finish_reason + - index + - message + - logprobs + properties: + finish_reason: + type: string + description: + &chat_completion_function_finish_reason_description | + The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, `length` if the maximum number of tokens specified in the request was reached, `content_filter` if content was omitted due to a flag from our content filters, or `function_call` if the model called a function. + enum: ["stop", "length", "function_call", "content_filter"] + index: + type: integer + description: The index of the choice in the list of choices. + message: + $ref: "#/components/schemas/ChatCompletionResponseMessage" + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. + model: + type: string + description: The model used for the chat completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion`. + enum: [chat.completion] + usage: + $ref: "#/components/schemas/CompletionUsage" + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion object + group: chat + example: *chat_completion_function_example + + ChatCompletionTokenLogprob: + type: object + properties: + token: &chat_completion_response_logprobs_token + description: The token. + type: string + logprob: &chat_completion_response_logprobs_token_logprob + description: The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely. + type: number + bytes: &chat_completion_response_logprobs_bytes + description: A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token. + type: array + items: + type: integer + nullable: true + top_logprobs: + description: List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned. + type: array + items: + type: object + properties: + token: *chat_completion_response_logprobs_token + logprob: *chat_completion_response_logprobs_token_logprob + bytes: *chat_completion_response_logprobs_bytes + required: + - token + - logprob + - bytes + required: + - token + - logprob + - bytes + - top_logprobs + + ListPaginatedFineTuningJobsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJob" + has_more: + type: boolean + object: + type: string + enum: [list] + required: + - object + - data + - has_more + + CreateChatCompletionStreamResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + properties: + id: + type: string + description: A unique identifier for the chat completion. Each chunk has the same ID. + choices: + type: array + description: | + A list of chat completion choices. Can contain more than one elements if `n` is greater than 1. Can also be empty for the + last chunk if you set `stream_options: {"include_usage": true}`. + items: + type: object + required: + - delta + - finish_reason + - index + properties: + delta: + $ref: "#/components/schemas/ChatCompletionStreamResponseDelta" + logprobs: *chat_completion_response_logprobs + finish_reason: + type: string + description: *chat_completion_finish_reason_description + enum: + [ + "stop", + "length", + "tool_calls", + "content_filter", + "function_call", + ] + nullable: true + index: + type: integer + description: The index of the choice in the list of choices. + created: + type: integer + description: The Unix timestamp (in seconds) of when the chat completion was created. Each chunk has the same timestamp. + model: + type: string + description: The model to generate the completion. + system_fingerprint: + type: string + description: | + This fingerprint represents the backend configuration that the model runs with. + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + object: + type: string + description: The object type, which is always `chat.completion.chunk`. + enum: [chat.completion.chunk] + usage: + type: object + description: | + An optional field that will only be present when you set `stream_options: {"include_usage": true}` in your request. + When present, it contains a null value except for the last chunk which contains the token usage statistics for the entire request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + required: + - choices + - created + - id + - model + - object + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_chunk_example + + CreateChatCompletionImageResponse: + type: object + description: Represents a streamed chunk of a chat completion response returned by model, based on the provided input. + x-oaiMeta: + name: The chat completion chunk object + group: chat + example: *chat_completion_image_example + + CreateImageRequest: + type: object + properties: + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters for `dall-e-2` and 4000 characters for `dall-e-3`. + type: string + example: "A cute baby sea otter" + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2", "dall-e-3"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-3" + nullable: true + description: The model to use for image generation. + n: &images_n + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. For `dall-e-3`, only `n=1` is supported. + quality: + type: string + enum: ["standard", "hd"] + default: "standard" + example: "standard" + description: The quality of the image that will be generated. `hd` creates images with finer details and greater consistency across the image. This param is only supported for `dall-e-3`. + response_format: &images_response_format + type: string + enum: ["url", "b64_json"] + default: "url" + example: "url" + nullable: true + description: The format in which the generated images are returned. Must be one of `url` or `b64_json`. URLs are only valid for 60 minutes after the image has been generated. + size: &images_size + type: string + enum: ["256x256", "512x512", "1024x1024", "1792x1024", "1024x1792"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024` for `dall-e-2`. Must be one of `1024x1024`, `1792x1024`, or `1024x1792` for `dall-e-3` models. + style: + type: string + enum: ["vivid", "natural"] + default: "vivid" + example: "vivid" + nullable: true + description: The style of the generated images. Must be one of `vivid` or `natural`. Vivid causes the model to lean towards generating hyper-real and dramatic images. Natural causes the model to produce more natural, less hyper-real looking images. This param is only supported for `dall-e-3`. + user: *end_user_param_configuration + required: + - prompt + + ImagesResponse: + properties: + created: + type: integer + data: + type: array + items: + $ref: "#/components/schemas/Image" + required: + - created + - data + + Image: + type: object + description: Represents the url or the content of an image generated by the OpenAI API. + properties: + b64_json: + type: string + description: The base64-encoded JSON of the generated image, if `response_format` is `b64_json`. + url: + type: string + description: The URL of the generated image, if `response_format` is `url` (default). + revised_prompt: + type: string + description: The prompt that was used to generate the image, if there was any revision to the prompt. + x-oaiMeta: + name: The image object + example: | + { + "url": "...", + "revised_prompt": "..." + } + + CreateImageEditRequest: + type: object + properties: + image: + description: The image to edit. Must be a valid PNG file, less than 4MB, and square. If mask is not provided, image must have transparency, which will be used as the mask. + type: string + format: binary + prompt: + description: A text description of the desired image(s). The maximum length is 1000 characters. + type: string + example: "A cute baby sea otter wearing a beret" + mask: + description: An additional image whose fully transparent areas (e.g. where alpha is zero) indicate where `image` should be edited. Must be a valid PNG file, less than 4MB, and have the same dimensions as `image`. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: + type: integer + minimum: 1 + maximum: 10 + default: 1 + example: 1 + nullable: true + description: The number of images to generate. Must be between 1 and 10. + size: &dalle2_images_size + type: string + enum: ["256x256", "512x512", "1024x1024"] + default: "1024x1024" + example: "1024x1024" + nullable: true + description: The size of the generated images. Must be one of `256x256`, `512x512`, or `1024x1024`. + response_format: *images_response_format + user: *end_user_param_configuration + required: + - prompt + - image + + CreateImageVariationRequest: + type: object + properties: + image: + description: The image to use as the basis for the variation(s). Must be a valid PNG file, less than 4MB, and square. + type: string + format: binary + model: + anyOf: + - type: string + - type: string + enum: ["dall-e-2"] + x-oaiTypeLabel: string + default: "dall-e-2" + example: "dall-e-2" + nullable: true + description: The model to use for image generation. Only `dall-e-2` is supported at this time. + n: *images_n + response_format: *images_response_format + size: *dalle2_images_size + user: *end_user_param_configuration + required: + - image + + CreateModerationRequest: + type: object + properties: + input: + description: The input text to classify + oneOf: + - type: string + default: "" + example: "I want to kill them." + - type: array + items: + type: string + default: "" + example: "I want to kill them." + model: + description: | + Two content moderations models are available: `text-moderation-stable` and `text-moderation-latest`. + + The default is `text-moderation-latest` which will be automatically upgraded over time. This ensures you are always using our most accurate model. If you use `text-moderation-stable`, we will provide advanced notice before updating the model. Accuracy of `text-moderation-stable` may be slightly lower than for `text-moderation-latest`. + nullable: false + default: "text-moderation-latest" + example: "text-moderation-stable" + anyOf: + - type: string + - type: string + enum: ["text-moderation-latest", "text-moderation-stable"] + x-oaiTypeLabel: string + required: + - input + + CreateModerationResponse: + type: object + description: Represents if a given text input is potentially harmful. + properties: + id: + type: string + description: The unique identifier for the moderation request. + model: + type: string + description: The model used to generate the moderation results. + results: + type: array + description: A list of moderation objects. + items: + type: object + properties: + flagged: + type: boolean + description: Whether any of the below categories are flagged. + categories: + type: object + description: A list of the categories, and whether they are flagged or not. + properties: + hate: + type: boolean + description: Content that expresses, incites, or promotes hate based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. Hateful content aimed at non-protected groups (e.g., chess players) is harassment. + hate/threatening: + type: boolean + description: Hateful content that also includes violence or serious harm towards the targeted group based on race, gender, ethnicity, religion, nationality, sexual orientation, disability status, or caste. + harassment: + type: boolean + description: Content that expresses, incites, or promotes harassing language towards any target. + harassment/threatening: + type: boolean + description: Harassment content that also includes violence or serious harm towards any target. + self-harm: + type: boolean + description: Content that promotes, encourages, or depicts acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/intent: + type: boolean + description: Content where the speaker expresses that they are engaging or intend to engage in acts of self-harm, such as suicide, cutting, and eating disorders. + self-harm/instructions: + type: boolean + description: Content that encourages performing acts of self-harm, such as suicide, cutting, and eating disorders, or that gives instructions or advice on how to commit such acts. + sexual: + type: boolean + description: Content meant to arouse sexual excitement, such as the description of sexual activity, or that promotes sexual services (excluding sex education and wellness). + sexual/minors: + type: boolean + description: Sexual content that includes an individual who is under 18 years old. + violence: + type: boolean + description: Content that depicts death, violence, or physical injury. + violence/graphic: + type: boolean + description: Content that depicts death, violence, or physical injury in graphic detail. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + category_scores: + type: object + description: A list of the categories along with their scores as predicted by model. + properties: + hate: + type: number + description: The score for the category 'hate'. + hate/threatening: + type: number + description: The score for the category 'hate/threatening'. + harassment: + type: number + description: The score for the category 'harassment'. + harassment/threatening: + type: number + description: The score for the category 'harassment/threatening'. + self-harm: + type: number + description: The score for the category 'self-harm'. + self-harm/intent: + type: number + description: The score for the category 'self-harm/intent'. + self-harm/instructions: + type: number + description: The score for the category 'self-harm/instructions'. + sexual: + type: number + description: The score for the category 'sexual'. + sexual/minors: + type: number + description: The score for the category 'sexual/minors'. + violence: + type: number + description: The score for the category 'violence'. + violence/graphic: + type: number + description: The score for the category 'violence/graphic'. + required: + - hate + - hate/threatening + - harassment + - harassment/threatening + - self-harm + - self-harm/intent + - self-harm/instructions + - sexual + - sexual/minors + - violence + - violence/graphic + required: + - flagged + - categories + - category_scores + required: + - id + - model + - results + x-oaiMeta: + name: The moderation object + example: *moderation_example + + ListFilesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/OpenAIFile" + object: + type: string + enum: [list] + required: + - object + - data + + CreateFileRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The File object (not file name) to be uploaded. + type: string + format: binary + purpose: + description: | + The intended purpose of the uploaded file. + + Use "fine-tune" for [Fine-tuning](/docs/api-reference/fine-tuning) and "assistants" for [Assistants](/docs/api-reference/assistants) and [Messages](/docs/api-reference/messages). This allows us to validate the format of the uploaded file is correct for fine-tuning. + type: string + enum: ["fine-tune", "assistants"] + required: + - file + - purpose + + DeleteFileResponse: + type: object + properties: + id: + type: string + object: + type: string + enum: [file] + deleted: + type: boolean + required: + - id + - object + - deleted + + CreateFineTuningJobRequest: + type: object + properties: + model: + description: | + The name of the model to fine-tune. You can select one of the + [supported models](/docs/guides/fine-tuning/what-models-can-be-fine-tuned). + example: "gpt-3.5-turbo" + anyOf: + - type: string + - type: string + enum: ["babbage-002", "davinci-002", "gpt-3.5-turbo"] + x-oaiTypeLabel: string + training_file: + description: | + The ID of an uploaded file that contains training data. + + See [upload file](/docs/api-reference/files/create) for how to upload a file. + + Your dataset must be formatted as a JSONL file. Additionally, you must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + example: "file-abc123" + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. + properties: + batch_size: + description: | + Number of examples in each batch. A larger batch size means that model parameters + are updated less frequently, but with lower variance. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 256 + default: auto + learning_rate_multiplier: + description: | + Scaling factor for the learning rate. A smaller learning rate may be useful to avoid + overfitting. + oneOf: + - type: string + enum: [auto] + - type: number + minimum: 0 + exclusiveMinimum: true + default: auto + n_epochs: + description: | + The number of epochs to train the model for. An epoch refers to one full cycle + through the training dataset. + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + suffix: + description: | + A string of up to 18 characters that will be added to your fine-tuned model name. + + For example, a `suffix` of "custom-model-name" would produce a model name like `ft:gpt-3.5-turbo:openai:custom-model-name:7p4lURel`. + type: string + minLength: 1 + maxLength: 40 + default: null + nullable: true + validation_file: + description: | + The ID of an uploaded file that contains validation data. + + If you provide this file, the data is used to generate validation + metrics periodically during fine-tuning. These metrics can be viewed in + the fine-tuning results file. + The same data should not be present in both train and validation files. + + Your dataset must be formatted as a JSONL file. You must upload your file with the purpose `fine-tune`. + + See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + type: string + nullable: true + example: "file-abc123" + integrations: + type: array + description: A list of integrations to enable for your fine-tuning job. + nullable: true + items: + type: object + required: + - type + - wandb + properties: + type: + description: | + The type of integration to enable. Currently, only "wandb" (Weights and Biases) is supported. + oneOf: + - type: string + enum: [wandb] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" + + seed: + description: | + The seed controls the reproducibility of the job. Passing in the same seed and job parameters should produce the same results, but may differ in rare cases. + If a seed is not specified, one will be generated for you. + type: integer + nullable: true + minimum: 0 + maximum: 2147483647 + example: 42 + required: + - model + - training_file + + ListFineTuningJobEventsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobEvent" + object: + type: string + enum: [list] + required: + - object + - data + + ListFineTuningJobCheckpointsResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/FineTuningJobCheckpoint" + object: + type: string + enum: [list] + first_id: + type: string + nullable: true + last_id: + type: string + nullable: true + has_more: + type: boolean + required: + - object + - data + - has_more + + CreateEmbeddingRequest: + type: object + additionalProperties: false + properties: + input: + description: | + Input text to embed, encoded as a string or array of tokens. To embed multiple inputs in a single request, pass an array of strings or array of token arrays. The input must not exceed the max input tokens for the model (8192 tokens for `text-embedding-ada-002`), cannot be an empty string, and any array must be 2048 dimensions or less. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + example: "The quick brown fox jumped over the lazy dog" + oneOf: + - type: string + title: string + description: The string that will be turned into an embedding. + default: "" + example: "This is a test." + - type: array + title: array + description: The array of strings that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: string + default: "" + example: "['This is a test.']" + - type: array + title: array + description: The array of integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: integer + example: "[1212, 318, 257, 1332, 13]" + - type: array + title: array + description: The array of arrays containing integers that will be turned into an embedding. + minItems: 1 + maxItems: 2048 + items: + type: array + minItems: 1 + items: + type: integer + example: "[[1212, 318, 257, 1332, 13]]" + x-oaiExpandable: true + model: + description: *model_description + example: "text-embedding-3-small" + anyOf: + - type: string + - type: string + enum: + [ + "text-embedding-ada-002", + "text-embedding-3-small", + "text-embedding-3-large", + ] + x-oaiTypeLabel: string + encoding_format: + description: "The format to return the embeddings in. Can be either `float` or [`base64`](https://pypi.org/project/pybase64/)." + example: "float" + default: "float" + type: string + enum: ["float", "base64"] + dimensions: + description: | + The number of dimensions the resulting output embeddings should have. Only supported in `text-embedding-3` and later models. + type: integer + minimum: 1 + user: *end_user_param_configuration + required: + - model + - input + + CreateEmbeddingResponse: + type: object + properties: + data: + type: array + description: The list of embeddings generated by the model. + items: + $ref: "#/components/schemas/Embedding" + model: + type: string + description: The name of the model used to generate the embedding. + object: + type: string + description: The object type, which is always "list". + enum: [list] + usage: + type: object + description: The usage information for the request. + properties: + prompt_tokens: + type: integer + description: The number of tokens used by the prompt. + total_tokens: + type: integer + description: The total number of tokens used by the request. + required: + - prompt_tokens + - total_tokens + required: + - object + - model + - data + - usage + + CreateTranscriptionRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) to transcribe, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + language: + description: | + The language of the input audio. Supplying the input language in [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format will improve accuracy and latency. + type: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should match the audio language. + type: string + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + enum: + - json + - text + - srt + - verbose_json + - vtt + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + timestamp_granularities[]: + description: | + The timestamp granularities to populate for this transcription. `response_format` must be set `verbose_json` to use timestamp granularities. Either or both of these options are supported: `word`, or `segment`. Note: There is no additional latency for segment timestamps, but generating word timestamps incurs additional latency. + type: array + items: + type: string + enum: + - word + - segment + default: [segment] + required: + - file + - model + + # Note: This does not currently support the non-default response format types. + CreateTranscriptionResponseJson: + type: object + description: Represents a transcription response returned by model, based on the provided input. + properties: + text: + type: string + description: The transcribed text. + required: + - text + x-oaiMeta: + name: The transcription object + group: audio + example: *basic_transcription_response_example + + TranscriptionSegment: + type: object + properties: + id: + type: integer + description: Unique identifier of the segment. + seek: + type: integer + description: Seek offset of the segment. + start: + type: number + format: float + description: Start time of the segment in seconds. + end: + type: number + format: float + description: End time of the segment in seconds. + text: + type: string + description: Text content of the segment. + tokens: + type: array + items: + type: integer + description: Array of token IDs for the text content. + temperature: + type: number + format: float + description: Temperature parameter used for generating the segment. + avg_logprob: + type: number + format: float + description: Average logprob of the segment. If the value is lower than -1, consider the logprobs failed. + compression_ratio: + type: number + format: float + description: Compression ratio of the segment. If the value is greater than 2.4, consider the compression failed. + no_speech_prob: + type: number + format: float + description: Probability of no speech in the segment. If the value is higher than 1.0 and the `avg_logprob` is below -1, consider this segment silent. + required: + - id + - seek + - start + - end + - text + - tokens + - temperature + - avg_logprob + - compression_ratio + - no_speech_prob + + TranscriptionWord: + type: object + properties: + word: + type: string + description: The text content of the word. + start: + type: number + format: float + description: Start time of the word in seconds. + end: + type: number + format: float + description: End time of the word in seconds. + required: [word, start, end] + + CreateTranscriptionResponseVerboseJson: + type: object + description: Represents a verbose json transcription response returned by model, based on the provided input. + properties: + language: + type: string + description: The language of the input audio. + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The transcribed text. + words: + type: array + description: Extracted words and their corresponding timestamps. + items: + $ref: "#/components/schemas/TranscriptionWord" + segments: + type: array + description: Segments of the transcribed text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [language, duration, text] + x-oaiMeta: + name: The transcription object + group: audio + example: *verbose_transcription_response_example + + CreateTranslationRequest: + type: object + additionalProperties: false + properties: + file: + description: | + The audio file object (not file name) translate, in one of these formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm. + type: string + x-oaiTypeLabel: file + format: binary + model: + description: | + ID of the model to use. Only `whisper-1` (which is powered by our open source Whisper V2 model) is currently available. + example: whisper-1 + anyOf: + - type: string + - type: string + enum: ["whisper-1"] + x-oaiTypeLabel: string + prompt: + description: | + An optional text to guide the model's style or continue a previous audio segment. The [prompt](/docs/guides/speech-to-text/prompting) should be in English. + type: string + response_format: + description: | + The format of the transcript output, in one of these options: `json`, `text`, `srt`, `verbose_json`, or `vtt`. + type: string + default: json + temperature: + description: | + The sampling temperature, between 0 and 1. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. If set to 0, the model will use [log probability](https://en.wikipedia.org/wiki/Log_probability) to automatically increase the temperature until certain thresholds are hit. + type: number + default: 0 + required: + - file + - model + + # Note: This does not currently support the non-default response format types. + CreateTranslationResponseJson: + type: object + properties: + text: + type: string + required: + - text + + CreateTranslationResponseVerboseJson: + type: object + properties: + language: + type: string + description: The language of the output translation (always `english`). + duration: + type: string + description: The duration of the input audio. + text: + type: string + description: The translated text. + segments: + type: array + description: Segments of the translated text and their corresponding details. + items: + $ref: "#/components/schemas/TranscriptionSegment" + required: [language, duration, text] + + CreateSpeechRequest: + type: object + additionalProperties: false + properties: + model: + description: | + One of the available [TTS models](/docs/models/tts): `tts-1` or `tts-1-hd` + anyOf: + - type: string + - type: string + enum: ["tts-1", "tts-1-hd"] + x-oaiTypeLabel: string + input: + type: string + description: The text to generate audio for. The maximum length is 4096 characters. + maxLength: 4096 + voice: + description: The voice to use when generating the audio. Supported voices are `alloy`, `echo`, `fable`, `onyx`, `nova`, and `shimmer`. Previews of the voices are available in the [Text to speech guide](/docs/guides/text-to-speech/voice-options). + type: string + enum: ["alloy", "echo", "fable", "onyx", "nova", "shimmer"] + response_format: + description: "The format to audio in. Supported formats are `mp3`, `opus`, `aac`, `flac`, `wav`, and `pcm`." + default: "mp3" + type: string + enum: ["mp3", "opus", "aac", "flac", "wav", "pcm"] + speed: + description: "The speed of the generated audio. Select a value from `0.25` to `4.0`. `1.0` is the default." + type: number + default: 1.0 + minimum: 0.25 + maximum: 4.0 + required: + - model + - input + - voice + + Model: + title: Model + description: Describes an OpenAI model offering that can be used with the API. + properties: + id: + type: string + description: The model identifier, which can be referenced in the API endpoints. + created: + type: integer + description: The Unix timestamp (in seconds) when the model was created. + object: + type: string + description: The object type, which is always "model". + enum: [model] + owned_by: + type: string + description: The organization that owns the model. + required: + - id + - object + - created + - owned_by + x-oaiMeta: + name: The model object + example: *retrieve_model_response + + OpenAIFile: + title: OpenAIFile + description: The `File` object represents a document that has been uploaded to OpenAI. + properties: + id: + type: string + description: The file identifier, which can be referenced in the API endpoints. + bytes: + type: integer + description: The size of the file, in bytes. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the file was created. + filename: + type: string + description: The name of the file. + object: + type: string + description: The object type, which is always `file`. + enum: ["file"] + purpose: + type: string + description: The intended purpose of the file. Supported values are `fine-tune`, `fine-tune-results`, `assistants`, and `assistants_output`. + enum: + [ + "fine-tune", + "fine-tune-results", + "assistants", + "assistants_output", + ] + status: + type: string + deprecated: true + description: Deprecated. The current status of the file, which can be either `uploaded`, `processed`, or `error`. + enum: ["uploaded", "processed", "error"] + status_details: + type: string + deprecated: true + description: Deprecated. For details on why a fine-tuning training file failed validation, see the `error` field on `fine_tuning.job`. + required: + - id + - object + - bytes + - created_at + - filename + - purpose + - status + x-oaiMeta: + name: The file object + example: | + { + "id": "file-abc123", + "object": "file", + "bytes": 120000, + "created_at": 1677610602, + "filename": "salesOverview.pdf", + "purpose": "assistants", + } + Embedding: + type: object + description: | + Represents an embedding vector returned by embedding endpoint. + properties: + index: + type: integer + description: The index of the embedding in the list of embeddings. + embedding: + type: array + description: | + The embedding vector, which is a list of floats. The length of vector depends on the model as listed in the [embedding guide](/docs/guides/embeddings). + items: + type: number + object: + type: string + description: The object type, which is always "embedding". + enum: [embedding] + required: + - index + - object + - embedding + x-oaiMeta: + name: The embedding object + example: | + { + "object": "embedding", + "embedding": [ + 0.0023064255, + -0.009327292, + .... (1536 floats total for ada-002) + -0.0028842222, + ], + "index": 0 + } + + FineTuningJob: + type: object + title: FineTuningJob + description: | + The `fine_tuning.job` object represents a fine-tuning job that has been created through the API. + properties: + id: + type: string + description: The object identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the fine-tuning job was created. + error: + type: object + nullable: true + description: For fine-tuning jobs that have `failed`, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + param: + type: string + description: The parameter that was invalid, usually `training_file` or `validation_file`. This field will be null if the failure was not parameter-specific. + nullable: true + required: + - code + - message + - param + fine_tuned_model: + type: string + nullable: true + description: The name of the fine-tuned model that is being created. The value will be null if the fine-tuning job is still running. + finished_at: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job was finished. The value will be null if the fine-tuning job is still running. + hyperparameters: + type: object + description: The hyperparameters used for the fine-tuning job. See the [fine-tuning guide](/docs/guides/fine-tuning) for more details. + properties: + n_epochs: + oneOf: + - type: string + enum: [auto] + - type: integer + minimum: 1 + maximum: 50 + default: auto + description: + The number of epochs to train the model for. An epoch refers to one full cycle through the training dataset. + + "auto" decides the optimal number of epochs based on the size of the dataset. If setting the number manually, we support any number between 1 and 50 epochs. + required: + - n_epochs + model: + type: string + description: The base model that is being fine-tuned. + object: + type: string + description: The object type, which is always "fine_tuning.job". + enum: [fine_tuning.job] + organization_id: + type: string + description: The organization that owns the fine-tuning job. + result_files: + type: array + description: The compiled results file ID(s) for the fine-tuning job. You can retrieve the results with the [Files API](/docs/api-reference/files/retrieve-contents). + items: + type: string + example: file-abc123 + status: + type: string + description: The current status of the fine-tuning job, which can be either `validating_files`, `queued`, `running`, `succeeded`, `failed`, or `cancelled`. + enum: + [ + "validating_files", + "queued", + "running", + "succeeded", + "failed", + "cancelled", + ] + trained_tokens: + type: integer + nullable: true + description: The total number of billable tokens processed by this fine-tuning job. The value will be null if the fine-tuning job is still running. + training_file: + type: string + description: The file ID used for training. You can retrieve the training data with the [Files API](/docs/api-reference/files/retrieve-contents). + validation_file: + type: string + nullable: true + description: The file ID used for validation. You can retrieve the validation results with the [Files API](/docs/api-reference/files/retrieve-contents). + integrations: + type: array + nullable: true + description: A list of integrations to enable for this fine-tuning job. + maxItems: 5 + items: + oneOf: + - $ref: "#/components/schemas/FineTuningIntegration" + x-oaiExpandable: true + seed: + type: integer + description: The seed used for the fine-tuning job. + estimated_finish: + type: integer + nullable: true + description: The Unix timestamp (in seconds) for when the fine-tuning job is estimated to finish. The value will be null if the fine-tuning job is not running. + required: + - created_at + - error + - finished_at + - fine_tuned_model + - hyperparameters + - id + - model + - object + - organization_id + - result_files + - status + - trained_tokens + - training_file + - validation_file + - seed + x-oaiMeta: + name: The fine-tuning job object + example: *fine_tuning_example + + FineTuningIntegration: + type: object + title: Fine-Tuning Job Integration + required: + - type + - wandb + properties: + type: + type: string + description: "The type of the integration being enabled for the fine-tuning job" + enum: ["wandb"] + wandb: + type: object + description: | + The settings for your integration with Weights and Biases. This payload specifies the project that + metrics will be sent to. Optionally, you can set an explicit display name for your run, add tags + to your run, and set a default entity (team, username, etc) to be associated with your run. + required: + - project + properties: + project: + description: | + The name of the project that the new run will be created under. + type: string + example: "my-wandb-project" + name: + description: | + A display name to set for the run. If not set, we will use the Job ID as the name. + nullable: true + type: string + entity: + description: | + The entity to use for the run. This allows you to set the team or username of the WandB user that you would + like associated with the run. If not set, the default entity for the registered WandB API key is used. + nullable: true + type: string + tags: + description: | + A list of tags to be attached to the newly created run. These tags are passed through directly to WandB. Some + default tags are generated by OpenAI: "openai/finetune", "openai/{base-model}", "openai/{ftjob-abcdef}". + type: array + items: + type: string + example: "custom-tag" + + FineTuningJobEvent: + type: object + description: Fine-tuning job event object + properties: + id: + type: string + created_at: + type: integer + level: + type: string + enum: ["info", "warn", "error"] + message: + type: string + object: + type: string + enum: [fine_tuning.job.event] + required: + - id + - object + - created_at + - level + - message + x-oaiMeta: + name: The fine-tuning job event object + example: | + { + "object": "fine_tuning.job.event", + "id": "ftevent-abc123" + "created_at": 1677610602, + "level": "info", + "message": "Created fine-tuning job" + } + + FineTuningJobCheckpoint: + type: object + title: FineTuningJobCheckpoint + description: | + The `fine_tuning.job.checkpoint` object represents a model checkpoint for a fine-tuning job that is ready to use. + properties: + id: + type: string + description: The checkpoint identifier, which can be referenced in the API endpoints. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the checkpoint was created. + fine_tuned_model_checkpoint: + type: string + description: The name of the fine-tuned checkpoint model that is created. + step_number: + type: integer + description: The step number that the checkpoint was created at. + metrics: + type: object + description: Metrics at the step number during the fine-tuning job. + properties: + step: + type: number + train_loss: + type: number + train_mean_token_accuracy: + type: number + valid_loss: + type: number + valid_mean_token_accuracy: + type: number + full_valid_loss: + type: number + full_valid_mean_token_accuracy: + type: number + fine_tuning_job_id: + type: string + description: The name of the fine-tuning job that this checkpoint was created from. + object: + type: string + description: The object type, which is always "fine_tuning.job.checkpoint". + enum: [fine_tuning.job.checkpoint] + required: + - created_at + - fine_tuning_job_id + - fine_tuned_model_checkpoint + - id + - metrics + - object + - step_number + x-oaiMeta: + name: The fine-tuning job checkpoint object + example: | + { + "object": "fine_tuning.job.checkpoint", + "id": "ftckpt_qtZ5Gyk4BLq1SfLFWp3RtO3P", + "created_at": 1712211699, + "fine_tuned_model_checkpoint": "ft:gpt-3.5-turbo-0125:my-org:custom_suffix:9ABel2dg:ckpt-step-88", + "fine_tuning_job_id": "ftjob-fpbNQ3H1GrMehXRf8cO97xTN", + "metrics": { + "step": 88, + "train_loss": 0.478, + "train_mean_token_accuracy": 0.924, + "valid_loss": 10.112, + "valid_mean_token_accuracy": 0.145, + "full_valid_loss": 0.567, + "full_valid_mean_token_accuracy": 0.944 + }, + "step_number": 88 + } + + CompletionUsage: + type: object + description: Usage statistics for the completion request. + properties: + completion_tokens: + type: integer + description: Number of tokens in the generated completion. + prompt_tokens: + type: integer + description: Number of tokens in the prompt. + total_tokens: + type: integer + description: Total number of tokens used in the request (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + + RunCompletionUsage: + type: object + description: Usage statistics related to the run. This value will be `null` if the run is not in a terminal state (i.e. `in_progress`, `queued`, etc.). + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + + RunStepCompletionUsage: + type: object + description: Usage statistics related to the run step. This value will be `null` while the run step's status is `in_progress`. + properties: + completion_tokens: + type: integer + description: Number of completion tokens used over the course of the run step. + prompt_tokens: + type: integer + description: Number of prompt tokens used over the course of the run step. + total_tokens: + type: integer + description: Total number of tokens used (prompt + completion). + required: + - prompt_tokens + - completion_tokens + - total_tokens + nullable: true + + AssistantsApiResponseFormatOption: + description: | + Specifies the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models since `gpt-3.5-turbo-1106`. + + Setting to `{ "type": "json_object" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly "stuck" request. Also note that the message content may be partially cut off if `finish_reason="length"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + oneOf: + - type: string + description: > + `auto` is the default value + enum: [none, auto] + - $ref: "#/components/schemas/AssistantsApiResponseFormat" + x-oaiExpandable: true + + AssistantsApiResponseFormat: + type: object + description: | + An object describing the expected output of the model. If `json_object` only `function` type `tools` are allowed to be passed to the Run. If `text` the model can return text or any value needed. + properties: + type: + type: string + enum: ["text", "json_object"] + example: "json_object" + default: "text" + description: Must be one of `text` or `json_object`. + + AssistantObject: + type: object + title: Assistant + description: Represents an `assistant` that can call the model and use tools. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `assistant`. + type: string + enum: [assistant] + created_at: + description: The Unix timestamp (in seconds) for when the assistant was created. + type: integer + name: + description: &assistant_name_param_description | + The name of the assistant. The maximum length is 256 characters. + type: string + maxLength: 256 + nullable: true + description: + description: &assistant_description_param_description | + The description of the assistant. The maximum length is 512 characters. + type: string + maxLength: 512 + nullable: true + model: + description: *model_description + type: string + instructions: + description: &assistant_instructions_param_description | + The system instructions that the assistant uses. The maximum length is 256,000 characters. + type: string + maxLength: 256000 + nullable: true + tools: + description: &assistant_tools_param_description | + A list of tool enabled on the assistant. There can be a maximum of 128 tools per assistant. Tools can be of types `code_interpreter`, `file_search`, or `function`. + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter`` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: &metadata_description | + Set of 16 key-value pairs that can be attached to an object. This can be useful for storing additional information about the object in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - id + - object + - created_at + - name + - description + - model + - instructions + - tools + - metadata + x-oaiMeta: + name: The assistant object + beta: true + example: *create_assistants_example + + CreateAssistantRequest: + type: object + additionalProperties: false + properties: + model: + description: *model_description + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + name: + description: *assistant_name_param_description + type: string + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description + type: string + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description + type: string + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: | + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + maxItems: 10000 + items: + type: string + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + oneOf: + - required: [vector_store_ids] + - required: [vector_stores] + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: &run_temperature_description | + What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - model + + ModifyAssistantRequest: + type: object + additionalProperties: false + properties: + model: + description: *model_description + anyOf: + - type: string + name: + description: *assistant_name_param_description + type: string + nullable: true + maxLength: 256 + description: + description: *assistant_description_param_description + type: string + nullable: true + maxLength: 512 + instructions: + description: *assistant_instructions_param_description + type: string + nullable: true + maxLength: 256000 + tools: + description: *assistant_tools_param_description + default: [] + type: array + maxItems: 128 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + Overrides the list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + Overrides the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + description: *run_temperature_description + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + + DeleteAssistantResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [assistant.deleted] + required: + - id + - object + - deleted + + ListAssistantsResponse: + type: object + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/AssistantObject" + first_id: + type: string + example: "asst_abc123" + last_id: + type: string + example: "asst_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + x-oaiMeta: + name: List assistants response object + group: chat + example: *list_assistants_example + + AssistantToolsCode: + type: object + title: Code interpreter tool + properties: + type: + type: string + description: "The type of tool being defined: `code_interpreter`" + enum: ["code_interpreter"] + required: + - type + + AssistantToolsFileSearch: + type: object + title: FileSearch tool + properties: + type: + type: string + description: "The type of tool being defined: `file_search`" + enum: ["file_search"] + required: + - type + + AssistantToolsFunction: + type: object + title: Function tool + properties: + type: + type: string + description: "The type of tool being defined: `function`" + enum: ["function"] + function: + $ref: "#/components/schemas/FunctionObject" + required: + - type + - function + + TruncationObject: + type: object + title: Thread Truncation Controls + description: Controls for how a thread will be truncated prior to the run. Use this to control the intial context window of the run. + properties: + type: + type: string + description: The truncation strategy to use for the thread. The default is `auto`. If set to `last_messages`, the thread will be truncated to the n most recent messages in the thread. When set to `auto`, messages in the middle of the thread will be dropped to fit the context length of the model, `max_prompt_tokens`. + enum: ["auto", "last_messages"] + last_messages: + type: integer + description: The number of most recent messages from the thread when constructing the context for the run. + minimum: 1 + nullable: true + required: + - type + + AssistantsApiToolChoiceOption: + description: | + Controls which (if any) tool is called by the model. + `none` means the model will not call any tools and instead generates a message. + `auto` is the default value and means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + Specifying a particular tool like `{"type": "file_search"}` or `{"type": "function", "function": {"name": "my_function"}}` forces the model to call that tool. + + oneOf: + - type: string + description: > + `none` means the model will not call any tools and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools before responding to the user. + enum: [none, auto, required] + - $ref: "#/components/schemas/AssistantsNamedToolChoice" + x-oaiExpandable: true + + AssistantsNamedToolChoice: + type: object + description: Specifies a tool the model should use. Use to force the model to call a specific tool. + properties: + type: + type: string + enum: ["function", "code_interpreter", "file_search"] + description: The type of the tool. If type is `function`, the function name must be set + function: + type: object + properties: + name: + type: string + description: The name of the function to call. + required: + - name + required: + - type + + RunObject: + type: object + title: A run on a thread + description: Represents an execution run on a [thread](/docs/api-reference/threads). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run`. + type: string + enum: ["thread.run"] + created_at: + description: The Unix timestamp (in seconds) for when the run was created. + type: integer + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was executed on as a part of this run. + type: string + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) used for execution of this run. + type: string + status: + description: The status of the run, which can be either `queued`, `in_progress`, `requires_action`, `cancelling`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: + [ + "queued", + "in_progress", + "requires_action", + "cancelling", + "cancelled", + "failed", + "completed", + "expired", + ] + required_action: + type: object + description: Details on the action required to continue the run. Will be `null` if no action is required. + nullable: true + properties: + type: + description: For now, this is always `submit_tool_outputs`. + type: string + enum: ["submit_tool_outputs"] + submit_tool_outputs: + type: object + description: Details on the tool outputs needed for this run to continue. + properties: + tool_calls: + type: array + description: A list of the relevant tool calls. + items: + $ref: "#/components/schemas/RunToolCallObject" + required: + - tool_calls + required: + - type + - submit_tool_outputs + last_error: + type: object + description: The last error associated with this run. Will be `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error`, `rate_limit_exceeded`, or `invalid_prompt`. + enum: ["server_error", "rate_limit_exceeded", "invalid_prompt"] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expires_at: + description: The Unix timestamp (in seconds) for when the run will expire. + type: integer + nullable: true + started_at: + description: The Unix timestamp (in seconds) for when the run was started. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run was completed. + type: integer + nullable: true + incomplete_details: + description: Details on why the run is incomplete. Will be `null` if the run is not incomplete. + type: object + nullable: true + properties: + reason: + description: The reason why the run is incomplete. This will point to which specific token limit was reached over the course of the run. + type: string + enum: ["max_completion_tokens", "max_prompt_tokens"] + model: + description: The model that the [assistant](/docs/api-reference/assistants) used for this run. + type: string + instructions: + description: The instructions that the [assistant](/docs/api-reference/assistants) used for this run. + type: string + tools: + description: The list of tools that the [assistant](/docs/api-reference/assistants) used for this run. + default: [] + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunCompletionUsage" + temperature: + description: The sampling temperature used for this run. If not set, defaults to 1. + type: number + nullable: true + top_p: + description: The nucleus sampling value used for this run. If not set, defaults to 1. + type: number + nullable: true + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens specified to have been used over the course of the run. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens specified to have been used over the course of the run. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - id + - object + - created_at + - thread_id + - assistant_id + - status + - required_action + - last_error + - expires_at + - started_at + - cancelled_at + - failed_at + - completed_at + - model + - instructions + - tools + - metadata + - usage + - incomplete_details + - max_prompt_tokens + - max_completion_tokens + - truncation_strategy + - tool_choice + - response_format + x-oaiMeta: + name: The run object + beta: true + example: | + { + "id": "run_abc123", + "object": "thread.run", + "created_at": 1698107661, + "assistant_id": "asst_abc123", + "thread_id": "thread_abc123", + "status": "completed", + "started_at": 1699073476, + "expires_at": null, + "cancelled_at": null, + "failed_at": null, + "completed_at": 1699073498, + "last_error": null, + "model": "gpt-4-turbo", + "instructions": null, + "tools": [{"type": "file_search"}, {"type": "code_interpreter"}], + "metadata": {}, + "incomplete_details": null, + "usage": { + "prompt_tokens": 123, + "completion_tokens": 456, + "total_tokens": 579 + }, + "temperature": 1.0, + "top_p": 1.0, + "max_prompt_tokens": 1000, + "max_completion_tokens": 1000, + "truncation_strategy": { + "type": "auto", + "last_messages": null + }, + "response_format": "auto", + "tool_choice": "auto" + } + CreateRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + type: string + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true + instructions: + description: Overrides the [instructions](/docs/api-reference/assistants/createAssistant) of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + additional_instructions: + description: Appends additional instructions at the end of the instructions for the run. This is useful for modifying the behavior on a per-run basis without overriding other instructions. + type: string + nullable: true + additional_messages: + description: Adds additional messages to the thread before creating the run. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + x-oaiExpandable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: &run_top_p_description | + An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or temperature but not both. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - thread_id + - assistant_id + ListRunsResponse: + type: object + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/RunObject" + first_id: + type: string + example: "run_abc123" + last_id: + type: string + example: "run_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + ModifyRunRequest: + type: object + additionalProperties: false + properties: + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + SubmitToolOutputsRunRequest: + type: object + additionalProperties: false + properties: + tool_outputs: + description: A list of tools for which the outputs are being submitted. + type: array + items: + type: object + properties: + tool_call_id: + type: string + description: The ID of the tool call in the `required_action` object within the run object the output is being submitted for. + output: + type: string + description: The output of the tool call to be submitted to continue the run. + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + required: + - tool_outputs + + RunToolCallObject: + type: object + description: Tool call objects + properties: + id: + type: string + description: The ID of the tool call. This ID must be referenced when you submit the tool outputs in using the [Submit tool outputs to run](/docs/api-reference/runs/submitToolOutputs) endpoint. + type: + type: string + description: The type of tool call the output is required for. For now, this is always `function`. + enum: ["function"] + function: + type: object + description: The function definition. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments that the model expects you to pass to the function. + required: + - name + - arguments + required: + - id + - type + - function + + CreateThreadAndRunRequest: + type: object + additionalProperties: false + properties: + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) to use to execute this run. + type: string + thread: + $ref: "#/components/schemas/CreateThreadRequest" + description: If no thread is provided, an empty thread will be created. + model: + description: The ID of the [Model](/docs/api-reference/models) to be used to execute this run. If a value is provided here, it will override the model associated with the assistant. If not, the model associated with the assistant will be used. + example: "gpt-4-turbo" + anyOf: + - type: string + - type: string + enum: + [ + "gpt-4-turbo", + "gpt-4-turbo-2024-04-09", + "gpt-4-0125-preview", + "gpt-4-turbo-preview", + "gpt-4-1106-preview", + "gpt-4-vision-preview", + "gpt-4", + "gpt-4-0314", + "gpt-4-0613", + "gpt-4-32k", + "gpt-4-32k-0314", + "gpt-4-32k-0613", + "gpt-3.5-turbo", + "gpt-3.5-turbo-16k", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + "gpt-3.5-turbo-0125", + "gpt-3.5-turbo-16k-0613", + ] + x-oaiTypeLabel: string + nullable: true + instructions: + description: Override the default system message of the assistant. This is useful for modifying the behavior on a per-run basis. + type: string + nullable: true + tools: + description: Override the tools the assistant can use for this run. This is useful for modifying the behavior on a per-run basis. + nullable: true + type: array + maxItems: 20 + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + - $ref: "#/components/schemas/AssistantToolsFunction" + tool_resources: + type: object + description: | + A set of resources that are used by the assistant's tools. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The ID of the [vector store](/docs/api-reference/vector-stores/object) attached to this assistant. There can be a maximum of 1 vector store attached to the assistant. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + temperature: + type: number + minimum: 0 + maximum: 2 + default: 1 + example: 1 + nullable: true + description: *run_temperature_description + top_p: + type: number + minimum: 0 + maximum: 1 + default: 1 + example: 1 + nullable: true + description: *run_top_p_description + stream: + type: boolean + nullable: true + description: | + If `true`, returns a stream of events that happen during the Run as server-sent events, terminating when the Run enters a terminal state with a `data: [DONE]` message. + max_prompt_tokens: + type: integer + nullable: true + description: | + The maximum number of prompt tokens that may be used over the course of the run. The run will make a best effort to use only the number of prompt tokens specified, across multiple turns of the run. If the run exceeds the number of prompt tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + max_completion_tokens: + type: integer + nullable: true + description: | + The maximum number of completion tokens that may be used over the course of the run. The run will make a best effort to use only the number of completion tokens specified, across multiple turns of the run. If the run exceeds the number of completion tokens specified, the run will end with status `incomplete`. See `incomplete_details` for more info. + minimum: 256 + truncation_strategy: + $ref: "#/components/schemas/TruncationObject" + nullable: true + tool_choice: + $ref: "#/components/schemas/AssistantsApiToolChoiceOption" + nullable: true + response_format: + $ref: "#/components/schemas/AssistantsApiResponseFormatOption" + nullable: true + required: + - thread_id + - assistant_id + + ThreadObject: + type: object + title: Thread + description: Represents a thread that contains [messages](/docs/api-reference/messages). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread`. + type: string + enum: ["thread"] + created_at: + description: The Unix timestamp (in seconds) for when the thread was created. + type: integer + tool_resources: + type: object + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - created_at + - tool_resources + - metadata + x-oaiMeta: + name: The thread object + beta: true + example: | + { + "id": "thread_abc123", + "object": "thread", + "created_at": 1698107661, + "metadata": {} + } + + CreateThreadRequest: + type: object + additionalProperties: false + properties: + messages: + description: A list of [messages](/docs/api-reference/messages) to start the thread with. + type: array + items: + $ref: "#/components/schemas/CreateMessageRequest" + tool_resources: + type: object + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string + vector_stores: + type: array + description: | + A helper to create a [vector store](/docs/api-reference/vector-stores/object) with file_ids and attach it to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs to add to the vector store. There can be a maximum of 10000 files in a vector store. + maxItems: 10000 + items: + type: string + metadata: + type: object + description: | + Set of 16 key-value pairs that can be attached to a vector store. This can be useful for storing additional information about the vector store in a structured format. Keys can be a maximum of 64 characters long and values can be a maxium of 512 characters long. + x-oaiTypeLabel: map + oneOf: + - required: [vector_store_ids] + - required: [vector_stores] + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + ModifyThreadRequest: + type: object + additionalProperties: false + properties: + tool_resources: + type: object + description: | + A set of resources that are made available to the assistant's tools in this thread. The resources are specific to the type of tool. For example, the `code_interpreter` tool requires a list of file IDs, while the `file_search` tool requires a list of vector store IDs. + properties: + code_interpreter: + type: object + properties: + file_ids: + type: array + description: | + A list of [file](/docs/api-reference/files) IDs made available to the `code_interpreter` tool. There can be a maximum of 20 files associated with the tool. + default: [] + maxItems: 20 + items: + type: string + file_search: + type: object + properties: + vector_store_ids: + type: array + description: | + The [vector store](/docs/api-reference/vector-stores/object) attached to this thread. There can be a maximum of 1 vector store attached to the thread. + maxItems: 1 + items: + type: string + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + DeleteThreadResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [thread.deleted] + required: + - id + - object + - deleted + + ListThreadsResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/ThreadObject" + first_id: + type: string + example: "asst_abc123" + last_id: + type: string + example: "asst_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + MessageObject: + type: object + title: The message object + description: Represents a message within a [thread](/docs/api-reference/threads). + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.message`. + type: string + enum: ["thread.message"] + created_at: + description: The Unix timestamp (in seconds) for when the message was created. + type: integer + thread_id: + description: The [thread](/docs/api-reference/threads) ID that this message belongs to. + type: string + status: + description: The status of the message, which can be either `in_progress`, `incomplete`, or `completed`. + type: string + enum: ["in_progress", "incomplete", "completed"] + incomplete_details: + description: On an incomplete message, details about why the message is incomplete. + type: object + properties: + reason: + type: string + description: The reason the message is incomplete. + enum: + [ + "content_filter", + "max_tokens", + "run_cancelled", + "run_expired", + "run_failed", + ] + nullable: true + required: + - reason + completed_at: + description: The Unix timestamp (in seconds) for when the message was completed. + type: integer + nullable: true + incomplete_at: + description: The Unix timestamp (in seconds) for when the message was marked as incomplete. + type: integer + nullable: true + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: ["user", "assistant"] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentImageFileObject" + - $ref: "#/components/schemas/MessageContentTextObject" + x-oaiExpandable: true + assistant_id: + description: If applicable, the ID of the [assistant](/docs/api-reference/assistants) that authored this message. + type: string + nullable: true + run_id: + description: The ID of the [run](/docs/api-reference/runs) associated with the creation of this message. Value is `null` when messages are created manually using the create message or create thread endpoints. + type: string + nullable: true + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they were added to. + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - created_at + - thread_id + - status + - incomplete_details + - completed_at + - incomplete_at + - role + - content + - assistant_id + - run_id + - attachments + - metadata + x-oaiMeta: + name: The message object + beta: true + example: | + { + "id": "msg_abc123", + "object": "thread.message", + "created_at": 1698983503, + "thread_id": "thread_abc123", + "role": "assistant", + "content": [ + { + "type": "text", + "text": { + "value": "Hi! How can I help you today?", + "annotations": [] + } + } + ], + "assistant_id": "asst_abc123", + "run_id": "run_abc123", + "attachments": [], + "metadata": {} + } + + MessageDeltaObject: + type: object + title: Message delta object + description: | + Represents a message delta i.e. any changed fields on a message during streaming. + properties: + id: + description: The identifier of the message, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.message.delta`. + type: string + enum: ["thread.message.delta"] + delta: + description: The delta containing the fields that have changed on the Message. + type: object + properties: + role: + description: The entity that produced the message. One of `user` or `assistant`. + type: string + enum: ["user", "assistant"] + content: + description: The content of the message in array of text and/or images. + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentImageFileObject" + - $ref: "#/components/schemas/MessageDeltaContentTextObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The message delta object + beta: true + example: | + { + "id": "msg_123", + "object": "thread.message.delta", + "delta": { + "content": [ + { + "index": 0, + "type": "text", + "text": { "value": "Hello", "annotations": [] } + } + ] + } + } + + CreateMessageRequest: + type: object + additionalProperties: false + required: + - role + - content + properties: + role: + type: string + enum: ["user", "assistant"] + description: | + The role of the entity that is creating the message. Allowed values include: + - `user`: Indicates the message is sent by an actual user and should be used in most cases to represent user-generated messages. + - `assistant`: Indicates the message is generated by the assistant. Use this value to insert messages from the assistant into the conversation. + content: + type: string + minLength: 1 + maxLength: 256000 + description: The content of the message. + attachments: + type: array + items: + type: object + properties: + file_id: + type: string + description: The ID of the file to attach to the message. + tools: + description: The tools to add this file to. + type: array + items: + oneOf: + - $ref: "#/components/schemas/AssistantToolsCode" + - $ref: "#/components/schemas/AssistantToolsFileSearch" + x-oaiExpandable: true + description: A list of files attached to the message, and the tools they should be added to. + required: + - file_id + - tools + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + ModifyMessageRequest: + type: object + additionalProperties: false + properties: + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + DeleteMessageResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [thread.message.deleted] + required: + - id + - object + - deleted + + ListMessagesResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/MessageObject" + first_id: + type: string + example: "msg_abc123" + last_id: + type: string + example: "msg_abc123" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + MessageContentImageFileObject: + title: Image file + type: object + description: References an image [File](/docs/api-reference/files) in the content of a message. + properties: + type: + description: Always `image_file`. + type: string + enum: ["image_file"] + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. + type: string + required: + - file_id + required: + - type + - image_file + + MessageDeltaContentImageFileObject: + title: Image file + type: object + description: References an image [File](/docs/api-reference/files) in the content of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `image_file`. + type: string + enum: ["image_file"] + image_file: + type: object + properties: + file_id: + description: The [File](/docs/api-reference/files) ID of the image in the message content. + type: string + required: + - index + - type + + MessageContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + type: + description: Always `text`. + type: string + enum: ["text"] + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - value + - annotations + required: + - type + - text + + MessageContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + properties: + type: + description: Always `file_citation`. + type: string + enum: ["file_citation"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + required: + - file_id + - quote + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_citation + - start_index + - end_index + + MessageContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + type: + description: Always `file_path`. + type: string + enum: ["file_path"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + required: + - file_id + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - type + - text + - file_path + - start_index + - end_index + + MessageDeltaContentTextObject: + title: Text + type: object + description: The text content that is part of a message. + properties: + index: + type: integer + description: The index of the content part in the message. + type: + description: Always `text`. + type: string + enum: ["text"] + text: + type: object + properties: + value: + description: The data that makes up the text. + type: string + annotations: + type: array + items: + oneOf: + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFileCitationObject" + - $ref: "#/components/schemas/MessageDeltaContentTextAnnotationsFilePathObject" + x-oaiExpandable: true + required: + - index + - type + + MessageDeltaContentTextAnnotationsFileCitationObject: + title: File citation + type: object + description: A citation within the message that points to a specific quote from a specific File associated with the assistant or the message. Generated when the assistant uses the "file_search" tool to search files. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_citation`. + type: string + enum: ["file_citation"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_citation: + type: object + properties: + file_id: + description: The ID of the specific File the citation is from. + type: string + quote: + description: The specific quote in the file. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + MessageDeltaContentTextAnnotationsFilePathObject: + title: File path + type: object + description: A URL for the file that's generated when the assistant used the `code_interpreter` tool to generate a file. + properties: + index: + type: integer + description: The index of the annotation in the text content part. + type: + description: Always `file_path`. + type: string + enum: ["file_path"] + text: + description: The text in the message content that needs to be replaced. + type: string + file_path: + type: object + properties: + file_id: + description: The ID of the file that was generated. + type: string + start_index: + type: integer + minimum: 0 + end_index: + type: integer + minimum: 0 + required: + - index + - type + + RunStepObject: + type: object + title: Run steps + description: | + Represents a step in execution of a run. + properties: + id: + description: The identifier of the run step, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run.step`. + type: string + enum: ["thread.run.step"] + created_at: + description: The Unix timestamp (in seconds) for when the run step was created. + type: integer + assistant_id: + description: The ID of the [assistant](/docs/api-reference/assistants) associated with the run step. + type: string + thread_id: + description: The ID of the [thread](/docs/api-reference/threads) that was run. + type: string + run_id: + description: The ID of the [run](/docs/api-reference/runs) that this run step is a part of. + type: string + type: + description: The type of run step, which can be either `message_creation` or `tool_calls`. + type: string + enum: ["message_creation", "tool_calls"] + status: + description: The status of the run step, which can be either `in_progress`, `cancelled`, `failed`, `completed`, or `expired`. + type: string + enum: ["in_progress", "cancelled", "failed", "completed", "expired"] + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsObject" + x-oaiExpandable: true + last_error: + type: object + description: The last error associated with this run step. Will be `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: ["server_error", "rate_limit_exceeded"] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + expired_at: + description: The Unix timestamp (in seconds) for when the run step expired. A step is considered expired if the parent run is expired. + type: integer + nullable: true + cancelled_at: + description: The Unix timestamp (in seconds) for when the run step was cancelled. + type: integer + nullable: true + failed_at: + description: The Unix timestamp (in seconds) for when the run step failed. + type: integer + nullable: true + completed_at: + description: The Unix timestamp (in seconds) for when the run step completed. + type: integer + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + usage: + $ref: "#/components/schemas/RunStepCompletionUsage" + required: + - id + - object + - created_at + - assistant_id + - thread_id + - run_id + - type + - status + - step_details + - last_error + - expired_at + - cancelled_at + - failed_at + - completed_at + - metadata + - usage + x-oaiMeta: + name: The run step object + beta: true + example: *run_step_object_example + + RunStepDeltaObject: + type: object + title: Run step delta object + description: | + Represents a run step delta i.e. any changed fields on a run step during streaming. + properties: + id: + description: The identifier of the run step, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `thread.run.step.delta`. + type: string + enum: ["thread.run.step.delta"] + delta: + description: The delta containing the fields that have changed on the run step. + type: object + properties: + step_details: + type: object + description: The details of the run step. + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsMessageCreationObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsObject" + x-oaiExpandable: true + required: + - id + - object + - delta + x-oaiMeta: + name: The run step delta object + beta: true + example: | + { + "id": "step_123", + "object": "thread.run.step.delta", + "delta": { + "step_details": { + "type": "tool_calls", + "tool_calls": [ + { + "index": 0, + "id": "call_123", + "type": "code_interpreter", + "code_interpreter": { "input": "", "outputs": [] } + } + ] + } + } + } + + ListRunStepsResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/RunStepObject" + first_id: + type: string + example: "step_abc123" + last_id: + type: string + example: "step_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + RunStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: ["message_creation"] + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - message_id + required: + - type + - message_creation + + RunStepDeltaStepDetailsMessageCreationObject: + title: Message creation + type: object + description: Details of the message creation by the run step. + properties: + type: + description: Always `message_creation`. + type: string + enum: ["message_creation"] + message_creation: + type: object + properties: + message_id: + type: string + description: The ID of the message that was created by this run step. + required: + - type + + RunStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: ["tool_calls"] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + - tool_calls + + RunStepDeltaStepDetailsToolCallsObject: + title: Tool calls + type: object + description: Details of the tool call. + properties: + type: + description: Always `tool_calls`. + type: string + enum: ["tool_calls"] + tool_calls: + type: array + description: | + An array of tool calls the run step was involved in. These can be associated with one of three types of tools: `code_interpreter`, `file_search`, or `function`. + items: + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFileSearchObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsFunctionObject" + x-oaiExpandable: true + required: + - type + + RunStepDetailsToolCallsCodeObject: + title: Code Interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: ["code_interpreter"] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + required: + - input + - outputs + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - id + - type + - code_interpreter + + RunStepDeltaStepDetailsToolCallsCodeObject: + title: Code interpreter tool call + type: object + description: Details of the Code Interpreter tool call the run step was involved in. + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call. + type: + type: string + description: The type of tool call. This is always going to be `code_interpreter` for this type of tool call. + enum: ["code_interpreter"] + code_interpreter: + type: object + description: The Code Interpreter tool call definition. + properties: + input: + type: string + description: The input to the Code Interpreter tool call. + outputs: + type: array + description: The outputs from the Code Interpreter tool call. Code Interpreter can output one or more items, including text (`logs`) or images (`image`). Each of these are represented by a different object type. + items: + type: object + oneOf: + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject" + - $ref: "#/components/schemas/RunStepDeltaStepDetailsToolCallsCodeOutputImageObject" + x-oaiExpandable: true + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputLogsObject: + title: Code Interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + type: + description: Always `logs`. + type: string + enum: ["logs"] + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - type + - logs + + RunStepDeltaStepDetailsToolCallsCodeOutputLogsObject: + title: Code interpreter log output + type: object + description: Text output from the Code Interpreter tool call as part of a run step. + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `logs`. + type: string + enum: ["logs"] + logs: + type: string + description: The text output from the Code Interpreter tool call. + required: + - index + - type + + RunStepDetailsToolCallsCodeOutputImageObject: + title: Code Interpreter image output + type: object + properties: + type: + description: Always `image`. + type: string + enum: ["image"] + image: + type: object + properties: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string + required: + - file_id + required: + - type + - image + + RunStepDeltaStepDetailsToolCallsCodeOutputImageObject: + title: Code interpreter image output + type: object + properties: + index: + type: integer + description: The index of the output in the outputs array. + type: + description: Always `image`. + type: string + enum: ["image"] + image: + type: object + properties: + file_id: + description: The [file](/docs/api-reference/files) ID of the image. + type: string + required: + - index + - type + + RunStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: ["file_search"] + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - id + - type + - file_search + + RunStepDeltaStepDetailsToolCallsFileSearchObject: + title: File search tool call + type: object + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `file_search` for this type of tool call. + enum: ["file_search"] + file_search: + type: object + description: For now, this is always going to be an empty object. + x-oaiTypeLabel: map + required: + - index + - type + - file_search + + RunStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: ["function"] + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true + required: + - name + - arguments + - output + required: + - id + - type + - function + + RunStepDeltaStepDetailsToolCallsFunctionObject: + type: object + title: Function tool call + properties: + index: + type: integer + description: The index of the tool call in the tool calls array. + id: + type: string + description: The ID of the tool call object. + type: + type: string + description: The type of tool call. This is always going to be `function` for this type of tool call. + enum: ["function"] + function: + type: object + description: The definition of the function that was called. + properties: + name: + type: string + description: The name of the function. + arguments: + type: string + description: The arguments passed to the function. + output: + type: string + description: The output of the function. This will be `null` if the outputs have not been [submitted](/docs/api-reference/runs/submitToolOutputs) yet. + nullable: true + required: + - index + - type + + VectorStoreExpirationAfter: + type: object + title: Vector store expiration policy + description: The expiration policy for a vector store. + properties: + anchor: + description: "Anchor timestamp after which the expiration policy applies. Supported anchors: `last_active_at`." + type: string + enum: ["last_active_at"] + days: + description: The number of days after the anchor time that the vector store will expire. + type: integer + minimum: 1 + maximum: 365 + required: + - anchor + - days + + VectorStoreObject: + type: object + title: Vector store + description: A vector store is a collection of processed files can be used by the `file_search` tool. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store`. + type: string + enum: ["vector_store"] + created_at: + description: The Unix timestamp (in seconds) for when the vector store was created. + type: integer + name: + description: The name of the vector store. + type: string + usage_bytes: + description: The total number of bytes used by the files in the vector store. + type: integer + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been successfully processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that were cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - failed + - cancelled + - total + status: + description: The status of the vector store, which can be either `expired`, `in_progress`, or `completed`. A status of `completed` indicates that the vector store is ready for use. + type: string + enum: ["expired", "in_progress", "completed"] + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + expires_at: + description: The Unix timestamp (in seconds) for when the vector store will expire. + type: integer + nullable: true + last_active_at: + description: The Unix timestamp (in seconds) for when the vector store was last active. + type: integer + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - usage_bytes + - created_at + - status + - last_active_at + - name + - file_counts + - metadata + x-oaiMeta: + name: The vector store object + beta: true + example: | + { + "id": "vs_123", + "object": "vector_store", + "created_at": 1698107661, + "usage_bytes": 123456, + "last_active_at": 1698107661, + "name": "my_vector_store", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "cancelled": 0, + "failed": 0, + "total": 100 + }, + "metadata": {}, + "last_used_at": 1698107661 + } + + CreateVectorStoreRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + maxItems: 500 + items: + type: string + name: + description: The name of the vector store. + type: string + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + UpdateVectorStoreRequest: + type: object + additionalProperties: false + properties: + name: + description: The name of the vector store. + type: string + nullable: true + expires_after: + $ref: "#/components/schemas/VectorStoreExpirationAfter" + nullable: true + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + + ListVectorStoresResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreObject" + first_id: + type: string + example: "vs_abc123" + last_id: + type: string + example: "vs_abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + DeleteVectorStoreResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [vector_store.deleted] + required: + - id + - object + - deleted + + VectorStoreFileObject: + type: object + title: Vector store files + description: A list of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file`. + type: string + enum: ["vector_store.file"] + usage_bytes: + description: The total vector store usage in bytes. Note that this may be different from the original file size. + type: integer + created_at: + description: The Unix timestamp (in seconds) for when the vector store file was created. + type: integer + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store file, which can be either `in_progress`, `completed`, `cancelled`, or `failed`. The status `completed` indicates that the vector store file is ready for use. + type: string + enum: ["in_progress", "completed", "cancelled", "failed"] + last_error: + type: object + description: The last error associated with this vector store file. Will be `null` if there are no errors. + nullable: true + properties: + code: + type: string + description: One of `server_error` or `rate_limit_exceeded`. + enum: + [ + "internal_error", + "file_not_found", + "parsing_error", + "unhandled_mime_type", + ] + message: + type: string + description: A human-readable description of the error. + required: + - code + - message + required: + - id + - object + - usage_bytes + - created_at + - vector_store_id + - status + - last_error + x-oaiMeta: + name: The vector store file object + beta: true + example: | + { + "id": "file-abc123", + "object": "vector_store.file", + "usage_bytes": 1234, + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "last_error": null + } + + CreateVectorStoreFileRequest: + type: object + additionalProperties: false + properties: + file_id: + description: A [File](/docs/api-reference/files) ID that the vector store should use. Useful for tools like `file_search` that can access files. + type: string + required: + - file_id + + ListVectorStoreFilesResponse: + properties: + object: + type: string + example: "list" + data: + type: array + items: + $ref: "#/components/schemas/VectorStoreFileObject" + first_id: + type: string + example: "file-abc123" + last_id: + type: string + example: "file-abc456" + has_more: + type: boolean + example: false + required: + - object + - data + - first_id + - last_id + - has_more + + DeleteVectorStoreFileResponse: + type: object + properties: + id: + type: string + deleted: + type: boolean + object: + type: string + enum: [vector_store.file.deleted] + required: + - id + - object + - deleted + + VectorStoreFileBatchObject: + type: object + title: Vector store file batch + description: A batch of files attached to a vector store. + properties: + id: + description: The identifier, which can be referenced in API endpoints. + type: string + object: + description: The object type, which is always `vector_store.file_batch`. + type: string + enum: ["vector_store.files_batch"] + created_at: + description: The Unix timestamp (in seconds) for when the vector store files batch was created. + type: integer + vector_store_id: + description: The ID of the [vector store](/docs/api-reference/vector-stores/object) that the [File](/docs/api-reference/files) is attached to. + type: string + status: + description: The status of the vector store files batch, which can be either `in_progress`, `completed`, `cancelled` or `failed`. + type: string + enum: ["in_progress", "completed", "cancelled", "failed"] + file_counts: + type: object + properties: + in_progress: + description: The number of files that are currently being processed. + type: integer + completed: + description: The number of files that have been processed. + type: integer + failed: + description: The number of files that have failed to process. + type: integer + cancelled: + description: The number of files that where cancelled. + type: integer + total: + description: The total number of files. + type: integer + required: + - in_progress + - completed + - cancelled + - failed + - total + required: + - id + - object + - created_at + - vector_store_id + - status + - file_counts + x-oaiMeta: + name: The vector store files batch object + beta: true + example: | + { + "id": "vsfb_123", + "object": "vector_store.files_batch", + "created_at": 1698107661, + "vector_store_id": "vs_abc123", + "status": "completed", + "file_counts": { + "in_progress": 0, + "completed": 100, + "failed": 0, + "cancelled": 0, + "total": 100 + } + } + + CreateVectorStoreFileBatchRequest: + type: object + additionalProperties: false + properties: + file_ids: + description: A list of [File](/docs/api-reference/files) IDs that the vector store should use. Useful for tools like `file_search` that can access files. + type: array + minItems: 1 + maxItems: 500 + items: + type: string + required: + - file_ids + + AssistantStreamEvent: + description: | + Represents an event emitted when streaming a Run. + + Each event in a server-sent events stream has an `event` and `data` property: + + ``` + event: thread.created + data: {"id": "thread_123", "object": "thread", ...} + ``` + + We emit events whenever a new object is created, transitions to a new state, or is being + streamed in parts (deltas). For example, we emit `thread.run.created` when a new run + is created, `thread.run.completed` when a run completes, and so on. When an Assistant chooses + to create a message during a run, we emit a `thread.message.created event`, a + `thread.message.in_progress` event, many `thread.message.delta` events, and finally a + `thread.message.completed` event. + + We may add additional events over time, so we recommend handling unknown events gracefully + in your code. See the [Assistants API quickstart](/docs/assistants/overview) to learn how to + integrate the Assistants API with streaming. + oneOf: + - $ref: "#/components/schemas/ThreadStreamEvent" + - $ref: "#/components/schemas/RunStreamEvent" + - $ref: "#/components/schemas/RunStepStreamEvent" + - $ref: "#/components/schemas/MessageStreamEvent" + - $ref: "#/components/schemas/ErrorEvent" + - $ref: "#/components/schemas/DoneEvent" + x-oaiMeta: + name: Assistant stream events + beta: true + + ThreadStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.created"] + data: + $ref: "#/components/schemas/ThreadObject" + required: + - event + - data + description: Occurs when a new [thread](/docs/api-reference/threads/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [thread](/docs/api-reference/threads/object)" + + RunStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.run.created"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a new [run](/docs/api-reference/runs/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.queued"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `queued` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.in_progress"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to an `in_progress` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.requires_action"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `requires_action` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.completed"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.failed"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.cancelling"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) moves to a `cancelling` status. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.cancelled"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.expired"] + data: + $ref: "#/components/schemas/RunObject" + required: + - event + - data + description: Occurs when a [run](/docs/api-reference/runs/object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run](/docs/api-reference/runs/object)" + + RunStepStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.run.step.created"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is created. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.in_progress"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.delta"] + data: + $ref: "#/components/schemas/RunStepDeltaObject" + required: + - event + - data + description: Occurs when parts of a [run step](/docs/api-reference/runs/step-object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [run step delta](/docs/api-reference/assistants-streaming/run-step-delta-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.completed"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.failed"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) fails. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.cancelled"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) is cancelled. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + - type: object + properties: + event: + type: string + enum: ["thread.run.step.expired"] + data: + $ref: "#/components/schemas/RunStepObject" + required: + - event + - data + description: Occurs when a [run step](/docs/api-reference/runs/step-object) expires. + x-oaiMeta: + dataDescription: "`data` is a [run step](/docs/api-reference/runs/step-object)" + + MessageStreamEvent: + oneOf: + - type: object + properties: + event: + type: string + enum: ["thread.message.created"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is created. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.in_progress"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) moves to an `in_progress` state. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.delta"] + data: + $ref: "#/components/schemas/MessageDeltaObject" + required: + - event + - data + description: Occurs when parts of a [Message](/docs/api-reference/messages/object) are being streamed. + x-oaiMeta: + dataDescription: "`data` is a [message delta](/docs/api-reference/assistants-streaming/message-delta-object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.completed"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + - type: object + properties: + event: + type: string + enum: ["thread.message.incomplete"] + data: + $ref: "#/components/schemas/MessageObject" + required: + - event + - data + description: Occurs when a [message](/docs/api-reference/messages/object) ends before it is completed. + x-oaiMeta: + dataDescription: "`data` is a [message](/docs/api-reference/messages/object)" + + ErrorEvent: + type: object + properties: + event: + type: string + enum: ["error"] + data: + $ref: "#/components/schemas/Error" + required: + - event + - data + description: Occurs when an [error](/docs/guides/error-codes/api-errors) occurs. This can happen due to an internal server error or a timeout. + x-oaiMeta: + dataDescription: "`data` is an [error](/docs/guides/error-codes/api-errors)" + + DoneEvent: + type: object + properties: + event: + type: string + enum: ["done"] + data: + type: string + enum: ["[DONE]"] + required: + - event + - data + description: Occurs when a stream ends. + x-oaiMeta: + dataDescription: "`data` is `[DONE]`" + + Batch: + type: object + properties: + id: + type: string + object: + type: string + enum: [batch] + description: The object type, which is always `batch`. + endpoint: + type: string + description: The OpenAI API endpoint used by the batch. + + errors: + type: object + properties: + object: + type: string + description: The object type, which is always `list`. + data: + type: array + items: + type: object + properties: + code: + type: string + description: An error code identifying the error type. + message: + type: string + description: A human-readable message providing more details about the error. + param: + type: string + description: The name of the parameter that caused the error, if applicable. + nullable: true + line: + type: integer + description: The line number of the input file where the error occurred, if applicable. + nullable: true + input_file_id: + type: string + description: The ID of the input file for the batch. + completion_window: + type: string + description: The time frame within which the batch should be processed. + status: + type: string + description: The current status of the batch. + enum: + - validating + - failed + - in_progress + - finalizing + - completed + - expired + - cancelling + - cancelled + output_file_id: + type: string + description: The ID of the file containing the outputs of successfully executed requests. + error_file_id: + type: string + description: The ID of the file containing the outputs of requests with errors. + created_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was created. + in_progress_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started processing. + expires_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch will expire. + finalizing_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started finalizing. + completed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was completed. + failed_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch failed. + expired_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch expired. + cancelling_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch started cancelling. + cancelled_at: + type: integer + description: The Unix timestamp (in seconds) for when the batch was cancelled. + request_counts: + type: object + properties: + total: + type: integer + description: Total number of requests in the batch. + completed: + type: integer + description: Number of requests that have been completed successfully. + failed: + type: integer + description: Number of requests that have failed. + required: + - total + - completed + - failed + description: The request counts for different statuses within the batch. + metadata: + description: *metadata_description + type: object + x-oaiTypeLabel: map + nullable: true + required: + - id + - object + - endpoint + - input_file_id + - completion_window + - status + - created_at + x-oaiMeta: + name: The batch object + example: *batch_object + + BatchRequestInput: + type: object + description: The per-line object of the batch input file + properties: + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. Must be unique for each request in a batch. + method: + type: string + enum: ["POST"] + description: The HTTP method to be used for the request. Currently only `POST` is supported. + url: + type: string + description: The OpenAI API relative URL to be used for the request. Currently `/v1/chat/completions` and `/v1/embeddings` are supported. + x-oaiMeta: + name: The request input object + example: | + {"custom_id": "request-1", "method": "POST", "url": "/v1/chat/completions", "body": {"model": "gpt-3.5-turbo", "messages": [{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What is 2+2?"}]}} + + BatchRequestOutput: + type: object + description: The per-line object of the batch output and error files + properties: + id: + type: string + custom_id: + type: string + description: A developer-provided per-request id that will be used to match outputs to inputs. + response: + type: object + nullable: true + properties: + status_code: + type: integer + description: The HTTP status code of the response + request_id: + type: string + description: An unique identifier for the OpenAI API request. Please include this request ID when contacting support. + body: + type: object + x-oaiTypeLabel: map + description: The JSON body of the response + error: + type: object + nullable: true + description: For requests that failed with a non-HTTP error, this will contain more information on the cause of the failure. + properties: + code: + type: string + description: A machine-readable error code. + message: + type: string + description: A human-readable error message. + x-oaiMeta: + name: The request output object + example: | + {"id": "batch_req_wnaDys", "custom_id": "request-2", "response": {"status_code": 200, "request_id": "req_c187b3", "body": {"id": "chatcmpl-9758Iw", "object": "chat.completion", "created": 1711475054, "model": "gpt-3.5-turbo", "choices": [{"index": 0, "message": {"role": "assistant", "content": "2 + 2 equals 4."}, "finish_reason": "stop"}], "usage": {"prompt_tokens": 24, "completion_tokens": 15, "total_tokens": 39}, "system_fingerprint": null}}, "error": null} + + ListBatchesResponse: + type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/Batch" + first_id: + type: string + example: "batch_abc123" + last_id: + type: string + example: "batch_abc456" + has_more: + type: boolean + object: + type: string + enum: [list] + required: + - object + - data + - has_more + +security: + - ApiKeyAuth: [] + +x-oaiMeta: + navigationGroups: + - id: endpoints + title: Endpoints + - id: assistants + title: Assistants + - id: legacy + title: Legacy + groups: + # > General Notes + # The `groups` section is used to generate the API reference pages and navigation, in the same + # order listed below. Additionally, each `group` can have a list of `sections`, each of which + # will become a navigation subroute and subsection under the group. Each section has: + # - `type`: Currently, either an `endpoint` or `object`, depending on how the section needs to + # be rendered + # - `key`: The reference key that can be used to lookup the section definition + # - `path`: The path (url) of the section, which is used to generate the navigation link. + # + # > The `object` sections maps to a schema component and the following fields are read for rendering + # - `x-oaiMeta.name`: The name of the object, which will become the section title + # - `x-oaiMeta.example`: The example object, which will be used to generate the example sample (always JSON) + # - `description`: The description of the object, which will be used to generate the section description + # + # > The `endpoint` section maps to an operation path and the following fields are read for rendering: + # - `x-oaiMeta.name`: The name of the endpoint, which will become the section title + # - `x-oaiMeta.examples`: The endpoint examples, which can be an object (meaning a single variation, most + # endpoints, or an array of objects, meaning multiple variations, e.g. the + # chat completion and completion endpoints, with streamed and non-streamed examples. + # - `x-oaiMeta.returns`: text describing what the endpoint returns. + # - `summary`: The summary of the endpoint, which will be used to generate the section description + - id: audio + title: Audio + description: | + Learn how to turn audio into text or text into audio. + + Related guide: [Speech to text](/docs/guides/speech-to-text) + navigationGroup: endpoints + sections: + - type: endpoint + key: createSpeech + path: createSpeech + - type: endpoint + key: createTranscription + path: createTranscription + - type: endpoint + key: createTranslation + path: createTranslation + - type: object + key: CreateTranscriptionResponseJson + path: json-object + - type: object + key: CreateTranscriptionResponseVerboseJson + path: verbose-json-object + - id: chat + title: Chat + description: | + Given a list of messages comprising a conversation, the model will return a response. + + Related guide: [Chat Completions](/docs/guides/text-generation) + navigationGroup: endpoints + sections: + - type: endpoint + key: createChatCompletion + path: create + - type: object + key: CreateChatCompletionResponse + path: object + - type: object + key: CreateChatCompletionStreamResponse + path: streaming + - id: embeddings + title: Embeddings + description: | + Get a vector representation of a given input that can be easily consumed by machine learning models and algorithms. + + Related guide: [Embeddings](/docs/guides/embeddings) + navigationGroup: endpoints + sections: + - type: endpoint + key: createEmbedding + path: create + - type: object + key: Embedding + path: object + - id: fine-tuning + title: Fine-tuning + description: | + Manage fine-tuning jobs to tailor a model to your specific training data. + + Related guide: [Fine-tune models](/docs/guides/fine-tuning) + navigationGroup: endpoints + sections: + - type: endpoint + key: createFineTuningJob + path: create + - type: endpoint + key: listPaginatedFineTuningJobs + path: list + - type: endpoint + key: listFineTuningEvents + path: list-events + - type: endpoint + key: listFineTuningJobCheckpoints + path: list-checkpoints + - type: endpoint + key: retrieveFineTuningJob + path: retrieve + - type: endpoint + key: cancelFineTuningJob + path: cancel + - type: object + key: FineTuningJob + path: object + - type: object + key: FineTuningJobEvent + path: event-object + - type: object + key: FineTuningJobCheckpoint + path: checkpoint-object + - id: batch + title: Batch + description: | + Create large batches of API requests for asynchronous processing. The Batch API returns completions within 24 hours for a 50% discount. + + Related guide: [Batch](/docs/guides/batch) + navigationGroup: endpoints + sections: + - type: endpoint + key: createBatch + path: create + - type: endpoint + key: retrieveBatch + path: retrieve + - type: endpoint + key: cancelBatch + path: cancel + - type: endpoint + key: listBatches + path: list + - type: object + key: Batch + path: object + - type: object + key: BatchRequestInput + path: requestInput + - type: object + key: BatchRequestOutput + path: requestOutput + - id: files + title: Files + description: | + Files are used to upload documents that can be used with features like [Assistants](/docs/api-reference/assistants) and [Fine-tuning](/docs/api-reference/fine-tuning). + navigationGroup: endpoints + sections: + - type: endpoint + key: createFile + path: create + - type: endpoint + key: listFiles + path: list + - type: endpoint + key: retrieveFile + path: retrieve + - type: endpoint + key: deleteFile + path: delete + - type: endpoint + key: downloadFile + path: retrieve-contents + - type: object + key: OpenAIFile + path: object + - id: images + title: Images + description: | + Given a prompt and/or an input image, the model will generate a new image. + + Related guide: [Image generation](/docs/guides/images) + navigationGroup: endpoints + sections: + - type: endpoint + key: createImage + path: create + - type: endpoint + key: createImageEdit + path: createEdit + - type: endpoint + key: createImageVariation + path: createVariation + - type: object + key: Image + path: object + - id: models + title: Models + description: | + List and describe the various models available in the API. You can refer to the [Models](/docs/models) documentation to understand what models are available and the differences between them. + navigationGroup: endpoints + sections: + - type: endpoint + key: listModels + path: list + - type: endpoint + key: retrieveModel + path: retrieve + - type: endpoint + key: deleteModel + path: delete + - type: object + key: Model + path: object + - id: moderations + title: Moderations + description: | + Given some input text, outputs if the model classifies it as potentially harmful across several categories. + + Related guide: [Moderations](/docs/guides/moderation) + navigationGroup: endpoints + sections: + - type: endpoint + key: createModeration + path: create + - type: object + key: CreateModerationResponse + path: object + - id: assistants + title: Assistants + beta: true + description: | + Build assistants that can call models and use tools to perform tasks. + + [Get started with the Assistants API](/docs/assistants) + navigationGroup: assistants + sections: + - type: endpoint + key: createAssistant + path: createAssistant + - type: endpoint + key: listAssistants + path: listAssistants + - type: endpoint + key: getAssistant + path: getAssistant + - type: endpoint + key: modifyAssistant + path: modifyAssistant + - type: endpoint + key: deleteAssistant + path: deleteAssistant + - type: object + key: AssistantObject + path: object + - id: threads + title: Threads + beta: true + description: | + Create threads that assistants can interact with. + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: createThread + path: createThread + - type: endpoint + key: getThread + path: getThread + - type: endpoint + key: modifyThread + path: modifyThread + - type: endpoint + key: deleteThread + path: deleteThread + - type: object + key: ThreadObject + path: object + - id: messages + title: Messages + beta: true + description: | + Create messages within threads + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: createMessage + path: createMessage + - type: endpoint + key: listMessages + path: listMessages + - type: endpoint + key: getMessage + path: getMessage + - type: endpoint + key: modifyMessage + path: modifyMessage + - type: endpoint + key: deleteMessage + path: deleteMessage + - type: object + key: MessageObject + path: object + - id: runs + title: Runs + beta: true + description: | + Represents an execution run on a thread. + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: createRun + path: createRun + - type: endpoint + key: createThreadAndRun + path: createThreadAndRun + - type: endpoint + key: listRuns + path: listRuns + - type: endpoint + key: getRun + path: getRun + - type: endpoint + key: modifyRun + path: modifyRun + - type: endpoint + key: submitToolOuputsToRun + path: submitToolOutputs + - type: endpoint + key: cancelRun + path: cancelRun + - type: object + key: RunObject + path: object + - id: run-steps + title: Run Steps + beta: true + description: | + Represents the steps (model and tool calls) taken during the run. + + Related guide: [Assistants](/docs/assistants/overview) + navigationGroup: assistants + sections: + - type: endpoint + key: listRunSteps + path: listRunSteps + - type: endpoint + key: getRunStep + path: getRunStep + - type: object + key: RunStepObject + path: step-object + - id: vector-stores + title: Vector Stores + beta: true + description: | + Vector stores are used to store files for use by the `file_search` tool. + + Related guide: [File Search](/docs/assistants/tools/file-search) + navigationGroup: assistants + sections: + - type: endpoint + key: createVectorStore + path: create + - type: endpoint + key: listVectorStores + path: list + - type: endpoint + key: getVectorStore + path: retrieve + - type: endpoint + key: modifyVectorStore + path: modify + - type: endpoint + key: deleteVectorStore + path: delete + - type: object + key: VectorStoreObject + path: object + - id: vector-stores-files + title: Vector Store Files + beta: true + description: | + Vector store files represent files inside a vector store. + + Related guide: [File Search](/docs/assistants/tools/file-search) + navigationGroup: assistants + sections: + - type: endpoint + key: createVectorStoreFile + path: createFile + - type: endpoint + key: listVectorStoreFiles + path: listFiles + - type: endpoint + key: getVectorStoreFile + path: getFile + - type: endpoint + key: deleteVectorStoreFile + path: deleteFile + - type: object + key: VectorStoreFileObject + path: file-object + - id: vector-stores-file-batches + title: Vector Store File Batches + beta: true + description: | + Vector store file batches represent operations to add multiple files to a vector store. + + Related guide: [File Search](/docs/assistants/tools/file-search) + navigationGroup: assistants + sections: + - type: endpoint + key: createVectorStoreFileBatch + path: createBatch + - type: endpoint + key: getVectorStoreFileBatch + path: getBatch + - type: endpoint + key: cancelVectorStoreFileBatch + path: cancelBatch + - type: endpoint + key: listFilesInVectorStoreBatch + path: listBatchFiles + - type: object + key: VectorStoreFileBatchObject + path: batch-object + - id: assistants-streaming + title: Streaming + beta: true + description: | + Stream the result of executing a Run or resuming a Run after submitting tool outputs. + + You can stream events from the [Create Thread and Run](/docs/api-reference/runs/createThreadAndRun), + [Create Run](/docs/api-reference/runs/createRun), and [Submit Tool Outputs](/docs/api-reference/runs/submitToolOutputs) + endpoints by passing `"stream": true`. The response will be a [Server-Sent events](https://html.spec.whatwg.org/multipage/server-sent-events.html#server-sent-events) stream. + + Our Node and Python SDKs provide helpful utilities to make streaming easy. Reference the + [Assistants API quickstart](/docs/assistants/overview) to learn more. + navigationGroup: assistants + sections: + - type: object + key: MessageDeltaObject + path: message-delta-object + - type: object + key: RunStepDeltaObject + path: run-step-delta-object + - type: object + key: AssistantStreamEvent + path: events + - id: completions + title: Completions + legacy: true + navigationGroup: legacy + description: | + Given a prompt, the model will return one or more predicted completions along with the probabilities of alternative tokens at each position. Most developer should use our [Chat Completions API](/docs/guides/text-generation/text-generation-models) to leverage our best and newest models. + sections: + - type: endpoint + key: createCompletion + path: create + - type: object + key: CreateCompletionResponse + path: object diff --git a/openapi.yaml b/openapi.yaml new file mode 100644 index 0000000..0cd212b --- /dev/null +++ b/openapi.yaml @@ -0,0 +1,475 @@ +openapi: 3.0.3 +info: + title: Log10 Feedback API Spec + description: Log10 Feedback API Spec + version: 1.0.0 +servers: + - url: https://log10.io +tags: + - name: Completions + description: Completions + - name: Feedback + description: Feedback + - name: FeedbackTasks + description: FeedbackTasks + - name: Sessions + description: Sessions +x-speakeasy-globals: + parameters: + - name: X-Log10-Organization + in: header + required: true + schema: + type: string +components: + securitySchemes: + Log10Token: # arbitrary name for the security scheme + type: apiKey + in: header # can be "header", "query" or "cookie" + name: X-Log10-Token # name of the header, query parameter or cookie + schemas: + Task: + type: object + required: + - name + - instruction + - json_schema + - completion_tags_selector + properties: + id: + type: string + description: The unique identifier for this task. + created_at_ms: + type: number + description: The epoch this schema was created. + json_schema: + type: object + description: The schema of the task. Must be valid JSON Schema. + name: + type: string + description: The name of the task. + instruction: + type: string + description: The instructions for this task. + completion_tags_selector: + type: object + description: The completion tag matching with this task i.e. surfaced as needing feedback. + items: + type: string + Feedback: + type: object + required: + - task_id + - json_values + - matched_completion_ids + - comment + properties: + id: + type: string + description: The unique identifier for this feedback. + created_at_ms: + type: number + description: The epoch this schema was created. + task_id: + type: string + description: The unique identifier for the task associated with this feedback. + json_values: + type: object + description: The values of the feedback. Must be valid JSON according to the task schema. + matched_completion_ids: + type: array + description: The matched completion ids associated with this feedback. + items: + type: string + comment: + type: string + description: The comment associated with this feedback. + completions_summary: + type: string + Completion: + type: object + required: + - organization_id + properties: + id: + type: string + description: The unique identifier for this task. + organization_id: + type: string + description: The unique identifier for the organization. + kind: + type: string + description: The kind of completion i.e. chat messages or prompt + enum: + - chat + - prompt + status: + type: string + description: The status of this completion. + enum: + - started + - finished + - failed + tags: + type: array + description: The tags for this completion. + items: + type: string + request: + type: object + $ref: "openai.yaml#/components/schemas/CreateChatCompletionRequest" + response: + type: object + $ref: "openai.yaml#/components/schemas/CreateChatCompletionResponse" + stacktrace: + type: array + description: The stacktrace for this completion. + items: + type: object + properties: + file: + type: string + description: The file associated with this stacktrace. + line: + type: string + description: The line associated with this stacktrace. + lineno: + type: number + description: The line number associated with this stacktrace. + name: + type: string + description: The function or module associated with this stacktrace. + required: + - file + - line + - lineno + - name + session_id: + type: string + description: The session id for this completion. + duration: + type: number + description: The duration of this completion in seconds. + failure_kind: + type: string + description: The failure kind of this completion. + failure_reason: + type: string + description: The failure reason of this completion. + Session: + type: object + properties: + id: + type: string + description: The unique identifier for this session. + +paths: + /api/v1/completions: + post: + tags: + - Completions + operationId: create + summary: Create a completion + security: + - Log10Token: [] + parameters: + - name: X-Log10-Organization + in: header + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/Completion" + responses: + "200": + description: Created + content: + application/json: + schema: + type: object + x-speakeasy-type-override: any + "201": + description: Created + content: + application/json: + schema: + $ref: "#/components/schemas/Completion" + + /api/v1/completions/{completionId}: + post: + parameters: + - name: completionId + in: path + required: true + description: The completion id to update. + schema: + type: string + - name: X-Log10-Organization + in: header + required: true + schema: + type: string + tags: + - Completions + operationId: update + summary: Update completion by id. + security: + - Log10Token: [] + requestBody: + required: true + content: + application/json: + schema: + $ref: "#/components/schemas/Completion" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Completion" + + /api/v1/sessions: + post: + tags: + - Sessions + x-speakeasy-name-override: create + x-speakeasy-usage-example: true + operationId: createSession + summary: Create a session + security: + - Log10Token: [] + parameters: + - name: X-Log10-Organization + in: header + required: true + schema: + type: string + responses: + "201": + description: Created + content: + application/json: + schema: + type: object + properties: + session: + $ref: "#/components/schemas/Session" + + /api/v1/completions/ungraded: + get: + tags: + - Completions + operationId: listUngraded + summary: List ungraded completions i.e. completions that have not been associated with feedback but matches task selector. + security: + - Log10Token: [] + parameters: + - name: X-Log10-Organization + in: header + required: true + schema: + type: string + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + completions: + type: array + items: + $ref: "#/components/schemas/Completion" + + /api/v1/feedback/{feedbackId}: + get: + tags: + - Feedback + parameters: + - name: feedbackId + in: path + required: true + description: The feedback id to fetch. + schema: + type: string + - name: X-Log10-Organization + in: header + required: true + schema: + type: string + operationId: get + summary: Fetch feedback by id. + security: + - Log10Token: [] + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Feedback" + + /api/v1/feedback: + get: + tags: + - Feedback + operationId: list + summary: List feedback + security: + - Log10Token: [] + parameters: + - name: X-Log10-Organization + in: header + required: true + schema: + type: string + requestBody: + content: + application/json: + schema: + type: object + properties: + offset: + type: integer + description: The offset to start fetching feedback from. + limit: + type: integer + description: The number of feedback to fetch. + completion_id: + type: string + description: The completion id to fetch feedback for. + task_id: + type: string + description: The task id to fetch feedback for. + responses: + "200": + description: OK + content: + application/json: + schema: + type: object + properties: + feedback: + type: array + items: + $ref: "#/components/schemas/Feedback" + post: + summary: Upload a piece of feedback + tags: + - Feedback + operationId: upload + security: + - Log10Token: [] + parameters: + - name: X-Log10-Organization + in: header + required: true + schema: + type: string + requestBody: + required: true + content: + application/json: + schema: + oneOf: + - allOf: + - $ref: "#/components/schemas/Feedback" + - type: object + required: + - allow_unmatched_feedback + - completion_tags_selector + properties: + allow_unmatched_feedback: + type: boolean + description: Whether to allow unmatched feedback. Defaults to false. + default: false + max_matched_completions: + type: integer + description: The maximum number of matched completions. Returns error if exceeded. Defaults to 100. + default: 100 + completion_tags_selector: + type: array + description: The completion tags associated with this feedback. + items: + type: string + - allOf: + - $ref: "#/components/schemas/Feedback" + - type: object + required: + - completion_ids + properties: + completion_ids: + type: array + description: The completion ids to associate with this feedback. + items: + type: string + + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Feedback" + + /api/v1/feedback_task: + get: + tags: + - FeedbackTasks + x-speakeasy-name-override: list + operationId: listFeedbackTasks + summary: List feedback tasks. + responses: + "200": + description: OK + content: + application/json: + schema: + type: array + items: + $ref: "#/components/schemas/Task" + post: + tags: + - FeedbackTasks + x-speakeasy-name-override: create + operationId: createFeedbackTask + summary: Create a new task. + requestBody: + content: + application/json: + schema: + $ref: "#/components/schemas/Task" + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Task" + /api/v1/feedback_task/{taskId}: + get: + parameters: + - name: taskId + in: path + required: true + description: The task id to fetch. + schema: + type: string + tags: + - FeedbackTasks + x-speakeasy-name-override: get + operationId: getFeedbackTask + summary: Retrieves feedback task `taskId`. + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: "#/components/schemas/Task" \ No newline at end of file diff --git a/py.typed b/py.typed new file mode 100644 index 0000000..3e38f1a --- /dev/null +++ b/py.typed @@ -0,0 +1 @@ +# Marker file for PEP 561. The package enables type hints. diff --git a/pylintrc b/pylintrc new file mode 100644 index 0000000..7995b87 --- /dev/null +++ b/pylintrc @@ -0,0 +1,647 @@ +[MAIN] + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Clear in-memory caches upon conclusion of linting. Useful if running pylint +# in a server-like mode. +clear-cache-post-run=no + +# Load and enable all available extensions. Use --list-extensions to see a list +# all available extensions. +#enable-all-extensions= + +# In error mode, messages with a category besides ERROR or FATAL are +# suppressed, and no reports are done by default. Error mode is compatible with +# disabling specific errors. +#errors-only= + +# Always return a 0 (non-error) status code, even if lint errors are found. +# This is primarily useful in continuous integration scripts. +#exit-zero= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-allow-list= + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. (This is an alternative name to extension-pkg-allow-list +# for backward compatibility.) +extension-pkg-whitelist= + +# Return non-zero exit code if any of these messages/categories are detected, +# even if score is above --fail-under value. Syntax same as enable. Messages +# specified are enabled, while categories only check already-enabled messages. +fail-on= + +# Specify a score threshold under which the program will exit with error. +fail-under=10 + +# Interpret the stdin as a python script, whose filename needs to be passed as +# the module_or_package argument. +#from-stdin= + +# Files or directories to be skipped. They should be base names, not paths. +ignore=CVS + +# Add files or directories matching the regular expressions patterns to the +# ignore-list. The regex matches against paths and can be in Posix or Windows +# format. Because '\\' represents the directory delimiter on Windows systems, +# it can't be used as an escape character. +ignore-paths= + +# Files or directories matching the regular expression patterns are skipped. +# The regex matches against base names, not paths. The default value ignores +# Emacs file locks +ignore-patterns=^\.# + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use, and will cap the count on Windows to +# avoid hangs. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins= + +# Pickle collected data for later comparisons. +persistent=yes + +# Minimum Python version to use for version dependent checks. Will default to +# the version used to run pylint. +py-version=3.8 + +# Discover python modules and packages in the file system subtree. +recursive=no + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + +# In verbose mode, extra non-checker-related info will be displayed. +#verbose= + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. If left empty, argument names will be checked with the set +# naming style. +#argument-rgx= + +# Naming style matching correct attribute names. +#attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. If left empty, attribute names will be checked with the set naming +# style. +attr-rgx=[^\W\d][^\W]*|__.*__$ + +# Bad variable names which should always be refused, separated by a comma. +bad-names= + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. If left empty, class attribute names will be checked +# with the set naming style. +#class-attribute-rgx= + +# Naming style matching correct class constant names. +class-const-naming-style=UPPER_CASE + +# Regular expression matching correct class constant names. Overrides class- +# const-naming-style. If left empty, class constant names will be checked with +# the set naming style. +#class-const-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. If left empty, class names will be checked with the set naming style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. If left empty, constant names will be checked with the set naming +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. If left empty, function names will be checked with the set +# naming style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + e, + id, + n + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=no + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. If left empty, inline iteration names will be checked +# with the set naming style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. If left empty, method names will be checked with the set naming style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. If left empty, module names will be checked with the set naming style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Regular expression matching correct type variable names. If left empty, type +# variable names will be checked with the set naming style. +#typevar-rgx= + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. If left empty, variable names will be checked with the set +# naming style. +#variable-rgx= + + +[CLASSES] + +# Warn about protected attribute access inside special methods +check-protected-access-in-special-methods=no + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=mcs + + +[DESIGN] + +# List of regular expressions of class ancestor names to ignore when counting +# public methods (see R0903) +exclude-too-few-public-methods= + +# List of qualified class names to ignore when counting class parents (see +# R0901) +ignored-parents= + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=25 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when caught. +overgeneral-exceptions=builtins.BaseException,builtins.Exception + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow explicit reexports by alias from a package __init__. +allow-reexport-from-package=no + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules= + +# Output a graph (.gv or any supported image format) of external dependencies +# to the given file (report RP0402 must not be disabled). +ext-import-graph= + +# Output a graph (.gv or any supported image format) of all (i.e. internal and +# external) dependencies to the given file (report RP0402 must not be +# disabled). +import-graph= + +# Output a graph (.gv or any supported image format) of internal dependencies +# to the given file (report RP0402 must not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, CONTROL_FLOW, INFERENCE, INFERENCE_FAILURE, +# UNDEFINED. +confidence=HIGH, + CONTROL_FLOW, + INFERENCE, + INFERENCE_FAILURE, + UNDEFINED + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then re-enable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + trailing-whitespace, + line-too-long, + missing-class-docstring, + missing-module-docstring, + missing-function-docstring, + too-many-instance-attributes, + wrong-import-order, + too-many-arguments, + broad-exception-raised, + too-few-public-methods, + too-many-branches, + chained-comparison, + duplicate-code, + trailing-newlines, + too-many-public-methods, + too-many-locals, + too-many-lines, + using-constant-test, + too-many-statements, + cyclic-import, + too-many-nested-blocks, + too-many-boolean-expressions, + no-else-raise, + bare-except, + broad-exception-caught + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[METHOD_ARGS] + +# List of qualified names (i.e., library.method) which require a timeout +# parameter e.g. 'requests.api.get,requests.api.post' +timeout-methods=requests.api.delete,requests.api.get,requests.api.head,requests.api.options,requests.api.patch,requests.api.post,requests.api.put,requests.api.request + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +notes-rgx= + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit,argparse.parse_error + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'fatal', 'error', 'warning', 'refactor', +# 'convention', and 'info' which contain the number of messages in each +# category, as well as 'statement' which is the total number of statements +# analyzed. This score is used by the global evaluation report (RP0004). +evaluation=max(0, 0 if fatal else 10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +#output-format= + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[SIMILARITIES] + +# Comments are removed from the similarity computation +ignore-comments=yes + +# Docstrings are removed from the similarity computation +ignore-docstrings=yes + +# Imports are removed from the similarity computation +ignore-imports=yes + +# Signatures are removed from the similarity computation +ignore-signatures=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the 'python-enchant' package. +spelling-dict= + +# List of comma separated words that should be considered directives if they +# appear at the beginning of a comment and should not be checked. +spelling-ignore-comment-directives=fmt: on,fmt: off,noqa:,noqa,nosec,isort:skip,mypy: + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=no + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of symbolic message names to ignore for Mixin members. +ignored-checks-for-mixins=no-member, + not-async-context-manager, + not-context-manager, + attribute-defined-outside-init + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,argparse.Namespace + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# Regex pattern to define which classes are considered mixins. +mixin-class-rgx=.*[Mm]ixin + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of names allowed to shadow builtins +allowed-redefined-builtins=id,object + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..6e61d31 --- /dev/null +++ b/setup.py @@ -0,0 +1,45 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import setuptools + +try: + with open('README.md', 'r') as fh: + long_description = fh.read() +except FileNotFoundError: + long_description = '' + +setuptools.setup( + name='log10py', + version='0.0.1', + author='Speakeasy', + description='Python Client SDK Generated by Speakeasy', + long_description=long_description, + long_description_content_type='text/markdown', + packages=setuptools.find_packages(where='src'), + install_requires=[ + "certifi>=2023.7.22", + "charset-normalizer>=3.2.0", + "dataclasses-json>=0.6.4", + "idna>=3.4", + "jsonpath-python>=1.0.6", + "marshmallow>=3.19.0", + "mypy-extensions>=1.0.0", + "packaging>=23.1", + "python-dateutil>=2.8.2", + "requests>=2.31.0", + "six>=1.16.0", + "typing-inspect>=0.9.0", + "typing_extensions>=4.7.1", + "urllib3>=1.26.18", + ], + extras_require={ + "dev": [ + "pylint==3.1.0", + ], + }, + package_dir={'': 'src'}, + python_requires='>=3.8', + package_data={ + 'log10py': ['py.typed'] + }, +) diff --git a/src/log10/__init__.py b/src/log10/__init__.py new file mode 100644 index 0000000..e6c0dee --- /dev/null +++ b/src/log10/__init__.py @@ -0,0 +1,4 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from .sdk import * +from .sdkconfiguration import * diff --git a/src/log10/_hooks/__init__.py b/src/log10/_hooks/__init__.py new file mode 100644 index 0000000..b2ab14b --- /dev/null +++ b/src/log10/_hooks/__init__.py @@ -0,0 +1,4 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from .sdkhooks import * +from .types import * diff --git a/src/log10/_hooks/sdkhooks.py b/src/log10/_hooks/sdkhooks.py new file mode 100644 index 0000000..1bd70b2 --- /dev/null +++ b/src/log10/_hooks/sdkhooks.py @@ -0,0 +1,55 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import requests +from .types import SDKInitHook, BeforeRequestContext, BeforeRequestHook, AfterSuccessContext, AfterSuccessHook, AfterErrorContext, AfterErrorHook, Hooks +from typing import List, Optional, Tuple + + +class SDKHooks(Hooks): + def __init__(self): + self.sdk_init_hooks: List[SDKInitHook] = [] + self.before_request_hooks: List[BeforeRequestHook] = [] + self.after_success_hooks: List[AfterSuccessHook] = [] + self.after_error_hooks: List[AfterErrorHook] = [] + + def register_sdk_init_hook(self, hook: SDKInitHook) -> None: + self.sdk_init_hooks.append(hook) + + def register_before_request_hook(self, hook: BeforeRequestHook) -> None: + self.before_request_hooks.append(hook) + + def register_after_success_hook(self, hook: AfterSuccessHook) -> None: + self.after_success_hooks.append(hook) + + def register_after_error_hook(self, hook: AfterErrorHook) -> None: + self.after_error_hooks.append(hook) + + def sdk_init(self, base_url: str, client: requests.Session) -> Tuple[str, requests.Session]: + for hook in self.sdk_init_hooks: + base_url, client = hook.sdk_init(base_url, client) + return base_url, client + + def before_request(self, hook_ctx: BeforeRequestContext, request: requests.PreparedRequest) -> requests.PreparedRequest: + for hook in self.before_request_hooks: + out = hook.before_request(hook_ctx, request) + if isinstance(out, Exception): + raise out + request = out + + return request + + def after_success(self, hook_ctx: AfterSuccessContext, response: requests.Response) -> requests.Response: + for hook in self.after_success_hooks: + out = hook.after_success(hook_ctx, response) + if isinstance(out, Exception): + raise out + response = out + return response + + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[requests.Response], error: Optional[Exception]) -> Tuple[Optional[requests.Response], Optional[Exception]]: + for hook in self.after_error_hooks: + result = hook.after_error(hook_ctx, response, error) + if isinstance(result, Exception): + raise result + response, error = result + return response, error diff --git a/src/log10/_hooks/types.py b/src/log10/_hooks/types.py new file mode 100644 index 0000000..b24c141 --- /dev/null +++ b/src/log10/_hooks/types.py @@ -0,0 +1,74 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import requests as requests_http +from abc import ABC, abstractmethod +from typing import Any, Callable, List, Optional, Tuple, Union + + +class HookContext: + operation_id: str + oauth2_scopes: Optional[List[str]] = None + security_source: Optional[Union[Any, Callable[[], Any]]] = None + + def __init__(self, operation_id: str, oauth2_scopes: Optional[List[str]], security_source: Optional[Union[Any, Callable[[], Any]]]): + self.operation_id = operation_id + self.oauth2_scopes = oauth2_scopes + self.security_source = security_source + + +class BeforeRequestContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class AfterSuccessContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + + +class AfterErrorContext(HookContext): + def __init__(self, hook_ctx: HookContext): + super().__init__(hook_ctx.operation_id, hook_ctx.oauth2_scopes, hook_ctx.security_source) + + +class SDKInitHook(ABC): + @abstractmethod + def sdk_init(self, base_url: str, client: requests_http.Session) -> Tuple[str, requests_http.Session]: + pass + + +class BeforeRequestHook(ABC): + @abstractmethod + def before_request(self, hook_ctx: BeforeRequestContext, request: requests_http.PreparedRequest) -> Union[requests_http.PreparedRequest, Exception]: + pass + + +class AfterSuccessHook(ABC): + @abstractmethod + def after_success(self, hook_ctx: AfterSuccessContext, response: requests_http.Response) -> Union[requests_http.Response, Exception]: + pass + + +class AfterErrorHook(ABC): + @abstractmethod + def after_error(self, hook_ctx: AfterErrorContext, response: Optional[requests_http.Response], error: Optional[Exception]) -> Union[Tuple[Optional[requests_http.Response], Optional[Exception]], Exception]: + pass + + +class Hooks(ABC): + @abstractmethod + def register_sdk_init_hook(self, hook: SDKInitHook): + pass + + @abstractmethod + def register_before_request_hook(self, hook: BeforeRequestHook): + pass + + @abstractmethod + def register_after_success_hook(self, hook: AfterSuccessHook): + pass + + @abstractmethod + def register_after_error_hook(self, hook: AfterErrorHook): + pass diff --git a/src/log10/completions.py b/src/log10/completions.py new file mode 100644 index 0000000..388cac6 --- /dev/null +++ b/src/log10/completions.py @@ -0,0 +1,234 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import requests as requests_http +from .sdkconfiguration import SDKConfiguration +from log10 import utils +from log10._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext, HookContext +from log10.models import components, errors, operations +from typing import Any, Optional + +class Completions: + r"""Completions""" + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + + + def create(self, completion: components.Completion, x_log10_organization: Optional[str] = None) -> operations.CreateResponse: + r"""Create a completion""" + hook_ctx = HookContext(operation_id='create', oauth2_scopes=[], security_source=self.sdk_configuration.security) + request = operations.CreateRequest( + x_log10_organization=x_log10_organization, + completion=completion, + ) + + _globals = operations.CreateGlobals( + x_log10_organization=self.sdk_configuration.globals.x_log10_organization, + ) + + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = utils.generate_url(base_url, '/api/v1/completions', request, _globals) + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + headers = { **utils.get_headers(request, _globals), **headers } + req_content_type, data, form = utils.serialize_request_body(request, operations.CreateRequest, "completion", False, False, 'json') + if req_content_type is not None and req_content_type not in ('multipart/form-data', 'multipart/mixed'): + headers['content-type'] = req_content_type + if data is None and form is None: + raise Exception('request body is required') + query_params = { **utils.get_query_params(request, _globals), **query_params } + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('POST', url, params=query_params, data=data, files=form, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.CreateResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 200: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[Any]) + res.any = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code == 201: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[components.Completion]) + res.completion = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + + def update(self, completion_id: str, completion: components.Completion, x_log10_organization: Optional[str] = None) -> operations.UpdateResponse: + r"""Update completion by id.""" + hook_ctx = HookContext(operation_id='update', oauth2_scopes=[], security_source=self.sdk_configuration.security) + request = operations.UpdateRequest( + completion_id=completion_id, + x_log10_organization=x_log10_organization, + completion=completion, + ) + + _globals = operations.UpdateGlobals( + x_log10_organization=self.sdk_configuration.globals.x_log10_organization, + ) + + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = utils.generate_url(base_url, '/api/v1/completions/{completionId}', request, _globals) + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + headers = { **utils.get_headers(request, _globals), **headers } + req_content_type, data, form = utils.serialize_request_body(request, operations.UpdateRequest, "completion", False, False, 'json') + if req_content_type is not None and req_content_type not in ('multipart/form-data', 'multipart/mixed'): + headers['content-type'] = req_content_type + if data is None and form is None: + raise Exception('request body is required') + query_params = { **utils.get_query_params(request, _globals), **query_params } + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('POST', url, params=query_params, data=data, files=form, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.UpdateResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 200: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[components.Completion]) + res.completion = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + + def list_ungraded(self, x_log10_organization: Optional[str] = None) -> operations.ListUngradedResponse: + r"""List ungraded completions i.e. completions that have not been associated with feedback but matches task selector.""" + hook_ctx = HookContext(operation_id='listUngraded', oauth2_scopes=[], security_source=self.sdk_configuration.security) + request = operations.ListUngradedRequest( + x_log10_organization=x_log10_organization, + ) + + _globals = operations.ListUngradedGlobals( + x_log10_organization=self.sdk_configuration.globals.x_log10_organization, + ) + + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = utils.generate_url(base_url, '/api/v1/completions/ungraded', request, _globals) + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + headers = { **utils.get_headers(request, _globals), **headers } + query_params = { **utils.get_query_params(request, _globals), **query_params } + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('GET', url, params=query_params, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.ListUngradedResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 200: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[operations.ListUngradedResponseBody]) + res.object = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + diff --git a/src/log10/feedback.py b/src/log10/feedback.py new file mode 100644 index 0000000..a86118b --- /dev/null +++ b/src/log10/feedback.py @@ -0,0 +1,224 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import requests as requests_http +from .sdkconfiguration import SDKConfiguration +from log10 import utils +from log10._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext, HookContext +from log10.models import components, errors, operations +from typing import Optional + +class Feedback: + r"""Feedback""" + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + + + def get(self, feedback_id: str, x_log10_organization: Optional[str] = None) -> operations.GetResponse: + r"""Fetch feedback by id.""" + hook_ctx = HookContext(operation_id='get', oauth2_scopes=[], security_source=self.sdk_configuration.security) + request = operations.GetRequest( + feedback_id=feedback_id, + x_log10_organization=x_log10_organization, + ) + + _globals = operations.GetGlobals( + x_log10_organization=self.sdk_configuration.globals.x_log10_organization, + ) + + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = utils.generate_url(base_url, '/api/v1/feedback/{feedbackId}', request, _globals) + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + headers = { **utils.get_headers(request, _globals), **headers } + query_params = { **utils.get_query_params(request, _globals), **query_params } + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('GET', url, params=query_params, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.GetResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 200: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[components.Feedback]) + res.feedback = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + + def list(self, x_log10_organization: Optional[str] = None, request_body: Optional[operations.ListRequestBody] = None) -> operations.ListResponse: + r"""List feedback""" + hook_ctx = HookContext(operation_id='list', oauth2_scopes=[], security_source=self.sdk_configuration.security) + request = operations.ListRequest( + x_log10_organization=x_log10_organization, + request_body=request_body, + ) + + _globals = operations.ListGlobals( + x_log10_organization=self.sdk_configuration.globals.x_log10_organization, + ) + + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = utils.generate_url(base_url, '/api/v1/feedback', request, _globals) + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + headers = { **utils.get_headers(request, _globals), **headers } + req_content_type, data, form = utils.serialize_request_body(request, operations.ListRequest, "request_body", False, True, 'json') + if req_content_type is not None and req_content_type not in ('multipart/form-data', 'multipart/mixed'): + headers['content-type'] = req_content_type + query_params = { **utils.get_query_params(request, _globals), **query_params } + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('GET', url, params=query_params, data=data, files=form, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.ListResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 200: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[operations.ListResponseBody]) + res.object = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + + def upload(self, request_body: operations.UploadRequestBody, x_log10_organization: Optional[str] = None) -> operations.UploadResponse: + r"""Upload a piece of feedback""" + hook_ctx = HookContext(operation_id='upload', oauth2_scopes=[], security_source=self.sdk_configuration.security) + request = operations.UploadRequest( + x_log10_organization=x_log10_organization, + request_body=request_body, + ) + + _globals = operations.UploadGlobals( + x_log10_organization=self.sdk_configuration.globals.x_log10_organization, + ) + + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = utils.generate_url(base_url, '/api/v1/feedback', request, _globals) + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + headers = { **utils.get_headers(request, _globals), **headers } + req_content_type, data, form = utils.serialize_request_body(request, operations.UploadRequest, "request_body", False, False, 'json') + if req_content_type is not None and req_content_type not in ('multipart/form-data', 'multipart/mixed'): + headers['content-type'] = req_content_type + if data is None and form is None: + raise Exception('request body is required') + query_params = { **utils.get_query_params(request, _globals), **query_params } + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('POST', url, params=query_params, data=data, files=form, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.UploadResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 200: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[components.Feedback]) + res.feedback = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + diff --git a/src/log10/feedbacktasks.py b/src/log10/feedbacktasks.py new file mode 100644 index 0000000..e0b76f8 --- /dev/null +++ b/src/log10/feedbacktasks.py @@ -0,0 +1,190 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import requests as requests_http +from .sdkconfiguration import SDKConfiguration +from log10 import utils +from log10._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext, HookContext +from log10.models import components, errors, operations +from typing import List, Optional + +class FeedbackTasks: + r"""FeedbackTasks""" + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + + + def list(self) -> operations.ListFeedbackTasksResponse: + r"""List feedback tasks.""" + hook_ctx = HookContext(operation_id='listFeedbackTasks', oauth2_scopes=[], security_source=self.sdk_configuration.security) + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = base_url + '/api/v1/feedback_task' + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('GET', url, params=query_params, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.ListFeedbackTasksResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 200: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[List[components.Task]]) + res.tasks = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + + def create(self, request: Optional[components.Task] = None) -> operations.CreateFeedbackTaskResponse: + r"""Create a new task.""" + hook_ctx = HookContext(operation_id='createFeedbackTask', oauth2_scopes=[], security_source=self.sdk_configuration.security) + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = base_url + '/api/v1/feedback_task' + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + req_content_type, data, form = utils.serialize_request_body(request, Optional[components.Task], "request", False, True, 'json') + if req_content_type is not None and req_content_type not in ('multipart/form-data', 'multipart/mixed'): + headers['content-type'] = req_content_type + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('POST', url, params=query_params, data=data, files=form, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.CreateFeedbackTaskResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 200: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[components.Task]) + res.task = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + + def get(self, task_id: str) -> operations.GetFeedbackTaskResponse: + r"""Retrieves feedback task `taskId`.""" + hook_ctx = HookContext(operation_id='getFeedbackTask', oauth2_scopes=[], security_source=self.sdk_configuration.security) + request = operations.GetFeedbackTaskRequest( + task_id=task_id, + ) + + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = utils.generate_url(base_url, '/api/v1/feedback_task/{taskId}', request) + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('GET', url, params=query_params, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.GetFeedbackTaskResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 200: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[components.Task]) + res.task = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + diff --git a/src/log10/models/__init__.py b/src/log10/models/__init__.py new file mode 100644 index 0000000..722bb99 --- /dev/null +++ b/src/log10/models/__init__.py @@ -0,0 +1,4 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + + +# package diff --git a/src/log10/models/components/__init__.py b/src/log10/models/components/__init__.py new file mode 100644 index 0000000..77e9459 --- /dev/null +++ b/src/log10/models/components/__init__.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from .chatcompletionfunctioncalloption import * +from .chatcompletionfunctions import * +from .chatcompletionmessagetoolcall import * +from .chatcompletionnamedtoolchoice import * +from .chatcompletionrequestassistantmessage import * +from .chatcompletionrequestfunctionmessage import * +from .chatcompletionrequestmessage import * +from .chatcompletionrequestmessagecontentpart import * +from .chatcompletionrequestmessagecontentpartimage import * +from .chatcompletionrequestmessagecontentparttext import * +from .chatcompletionrequestsystemmessage import * +from .chatcompletionrequesttoolmessage import * +from .chatcompletionrequestusermessage import * +from .chatcompletionresponsemessage import * +from .chatcompletionstreamoptions import * +from .chatcompletiontokenlogprob import * +from .chatcompletiontool import * +from .chatcompletiontoolchoiceoption import * +from .completion import * +from .completionusage import * +from .createchatcompletionrequest import * +from .createchatcompletionresponse import * +from .feedback import * +from .functionobject import * +from .httpmetadata import * +from .security import * +from .session import * +from .task import * + +__all__ = ["ChatCompletionFunctionCallOption","ChatCompletionFunctions","ChatCompletionMessageToolCall","ChatCompletionMessageToolCallType","ChatCompletionNamedToolChoice","ChatCompletionNamedToolChoiceFunction","ChatCompletionNamedToolChoiceType","ChatCompletionRequestAssistantMessage","ChatCompletionRequestAssistantMessageRole","ChatCompletionRequestFunctionMessage","ChatCompletionRequestFunctionMessageRole","ChatCompletionRequestMessage","ChatCompletionRequestMessageContentPart","ChatCompletionRequestMessageContentPartImage","ChatCompletionRequestMessageContentPartImageType","ChatCompletionRequestMessageContentPartText","ChatCompletionRequestSystemMessage","ChatCompletionRequestToolMessage","ChatCompletionRequestToolMessageRole","ChatCompletionRequestUserMessage","ChatCompletionRequestUserMessageRole","ChatCompletionResponseMessage","ChatCompletionResponseMessageFunctionCall","ChatCompletionResponseMessageRole","ChatCompletionStreamOptions","ChatCompletionTokenLogprob","ChatCompletionTool","ChatCompletionToolChoiceOption","ChatCompletionToolChoiceOption1","ChatCompletionToolType","Choices","Completion","CompletionTagsSelector","CompletionUsage","Content","CreateChatCompletionRequest","CreateChatCompletionRequestFunctionCall","CreateChatCompletionRequestType","CreateChatCompletionResponse","Detail","Feedback","FinishReason","Function","FunctionCall","FunctionObject","HTTPMetadata","ImageURL","JSONSchema","JSONValues","Kind","Logprobs","Model","Object","One","ResponseFormat","Role","Security","Session","Stacktrace","Status","Stop","Task","TopLogprobs","Two","Type"] diff --git a/src/log10/models/components/chatcompletionfunctioncalloption.py b/src/log10/models/components/chatcompletionfunctioncalloption.py new file mode 100644 index 0000000..51f269d --- /dev/null +++ b/src/log10/models/components/chatcompletionfunctioncalloption.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from log10 import utils + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionFunctionCallOption: + r"""Specifying a particular function via `{\\"name\\": \\"my_function\\"}` forces the model to call that function.""" + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The name of the function to call.""" + + diff --git a/src/log10/models/components/chatcompletionfunctions.py b/src/log10/models/components/chatcompletionfunctions.py new file mode 100644 index 0000000..98894de --- /dev/null +++ b/src/log10/models/components/chatcompletionfunctions.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import Any, Dict, Optional + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionFunctions: + r"""Deprecated class: This will be removed in a future release, please migrate away from it as soon as possible.""" + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.""" + description: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('description'), 'exclude': lambda f: f is None }}) + r"""A description of what the function does, used by the model to choose when and how to call the function.""" + parameters: Optional[Dict[str, Any]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('parameters'), 'exclude': lambda f: f is None }}) + r"""The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + diff --git a/src/log10/models/components/chatcompletionmessagetoolcall.py b/src/log10/models/components/chatcompletionmessagetoolcall.py new file mode 100644 index 0000000..bac53e0 --- /dev/null +++ b/src/log10/models/components/chatcompletionmessagetoolcall.py @@ -0,0 +1,37 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils + + +class ChatCompletionMessageToolCallType(str, Enum): + r"""The type of the tool. Currently, only `function` is supported.""" + FUNCTION = 'function' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class Function: + r"""The function that the model called.""" + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The name of the function to call.""" + arguments: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('arguments') }}) + r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionMessageToolCall: + id: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('id') }}) + r"""The ID of the tool call.""" + type: ChatCompletionMessageToolCallType = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('type') }}) + r"""The type of the tool. Currently, only `function` is supported.""" + function: Function = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('function') }}) + r"""The function that the model called.""" + + diff --git a/src/log10/models/components/chatcompletionnamedtoolchoice.py b/src/log10/models/components/chatcompletionnamedtoolchoice.py new file mode 100644 index 0000000..d39faa2 --- /dev/null +++ b/src/log10/models/components/chatcompletionnamedtoolchoice.py @@ -0,0 +1,32 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils + + +class ChatCompletionNamedToolChoiceType(str, Enum): + r"""The type of the tool. Currently, only `function` is supported.""" + FUNCTION = 'function' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionNamedToolChoiceFunction: + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The name of the function to call.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionNamedToolChoice: + r"""Specifies a tool the model should use. Use to force the model to call a specific function.""" + type: ChatCompletionNamedToolChoiceType = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('type') }}) + r"""The type of the tool. Currently, only `function` is supported.""" + function: ChatCompletionNamedToolChoiceFunction = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('function') }}) + + diff --git a/src/log10/models/components/chatcompletionrequestassistantmessage.py b/src/log10/models/components/chatcompletionrequestassistantmessage.py new file mode 100644 index 0000000..9778af4 --- /dev/null +++ b/src/log10/models/components/chatcompletionrequestassistantmessage.py @@ -0,0 +1,50 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from .chatcompletionmessagetoolcall import ChatCompletionMessageToolCall +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils +from typing import List, Optional + + +class ChatCompletionRequestAssistantMessageRole(str, Enum): + r"""The role of the messages author, in this case `assistant`.""" + ASSISTANT = 'assistant' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class FunctionCall: + r"""Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + + Deprecated class: This will be removed in a future release, please migrate away from it as soon as possible. + """ + arguments: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('arguments') }}) + r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.""" + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The name of the function to call.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionRequestAssistantMessage: + UNSET='__SPEAKEASY_UNSET__' + role: ChatCompletionRequestAssistantMessageRole = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('role') }}) + r"""The role of the messages author, in this case `assistant`.""" + content: Optional[str] = dataclasses.field(default=UNSET, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('content'), 'exclude': lambda f: f is ChatCompletionRequestAssistantMessage.UNSET }}) + r"""The contents of the assistant message. Required unless `tool_calls` or `function_call` is specified.""" + name: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name'), 'exclude': lambda f: f is None }}) + r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role.""" + tool_calls: Optional[List[ChatCompletionMessageToolCall]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('tool_calls'), 'exclude': lambda f: f is None }}) + r"""The tool calls generated by the model, such as function calls.""" + function_call: Optional[FunctionCall] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('function_call'), 'exclude': lambda f: f is None }}) + r"""Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + + Deprecated field: This will be removed in a future release, please migrate away from it as soon as possible. + """ + + diff --git a/src/log10/models/components/chatcompletionrequestfunctionmessage.py b/src/log10/models/components/chatcompletionrequestfunctionmessage.py new file mode 100644 index 0000000..7eb6e1d --- /dev/null +++ b/src/log10/models/components/chatcompletionrequestfunctionmessage.py @@ -0,0 +1,27 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils +from typing import Optional + + +class ChatCompletionRequestFunctionMessageRole(str, Enum): + r"""The role of the messages author, in this case `function`.""" + FUNCTION = 'function' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionRequestFunctionMessage: + r"""Deprecated class: This will be removed in a future release, please migrate away from it as soon as possible.""" + role: ChatCompletionRequestFunctionMessageRole = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('role') }}) + r"""The role of the messages author, in this case `function`.""" + content: Optional[str] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('content') }}) + r"""The contents of the function message.""" + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The name of the function to call.""" + + diff --git a/src/log10/models/components/chatcompletionrequestmessage.py b/src/log10/models/components/chatcompletionrequestmessage.py new file mode 100644 index 0000000..bec7040 --- /dev/null +++ b/src/log10/models/components/chatcompletionrequestmessage.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionrequestassistantmessage import ChatCompletionRequestAssistantMessage +from .chatcompletionrequestfunctionmessage import ChatCompletionRequestFunctionMessage +from .chatcompletionrequestsystemmessage import ChatCompletionRequestSystemMessage +from .chatcompletionrequesttoolmessage import ChatCompletionRequestToolMessage +from .chatcompletionrequestusermessage import ChatCompletionRequestUserMessage +from typing import Union + +ChatCompletionRequestMessage = Union[ChatCompletionRequestSystemMessage, ChatCompletionRequestUserMessage, ChatCompletionRequestAssistantMessage, ChatCompletionRequestToolMessage, ChatCompletionRequestFunctionMessage] diff --git a/src/log10/models/components/chatcompletionrequestmessagecontentpart.py b/src/log10/models/components/chatcompletionrequestmessagecontentpart.py new file mode 100644 index 0000000..91d3f64 --- /dev/null +++ b/src/log10/models/components/chatcompletionrequestmessagecontentpart.py @@ -0,0 +1,8 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionrequestmessagecontentpartimage import ChatCompletionRequestMessageContentPartImage +from .chatcompletionrequestmessagecontentparttext import ChatCompletionRequestMessageContentPartText +from typing import Union + +ChatCompletionRequestMessageContentPart = Union[ChatCompletionRequestMessageContentPartText, ChatCompletionRequestMessageContentPartImage] diff --git a/src/log10/models/components/chatcompletionrequestmessagecontentpartimage.py b/src/log10/models/components/chatcompletionrequestmessagecontentpartimage.py new file mode 100644 index 0000000..6f273d3 --- /dev/null +++ b/src/log10/models/components/chatcompletionrequestmessagecontentpartimage.py @@ -0,0 +1,41 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils +from typing import Optional + + +class ChatCompletionRequestMessageContentPartImageType(str, Enum): + r"""The type of the content part.""" + IMAGE_URL = 'image_url' + + +class Detail(str, Enum): + r"""Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).""" + AUTO = 'auto' + LOW = 'low' + HIGH = 'high' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ImageURL: + url: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('url') }}) + r"""Either a URL of the image or the base64 encoded image data.""" + detail: Optional[Detail] = dataclasses.field(default=Detail.AUTO, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('detail'), 'exclude': lambda f: f is None }}) + r"""Specifies the detail level of the image. Learn more in the [Vision guide](/docs/guides/vision/low-or-high-fidelity-image-understanding).""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionRequestMessageContentPartImage: + type: ChatCompletionRequestMessageContentPartImageType = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('type') }}) + r"""The type of the content part.""" + image_url: ImageURL = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('image_url') }}) + + diff --git a/src/log10/models/components/chatcompletionrequestmessagecontentparttext.py b/src/log10/models/components/chatcompletionrequestmessagecontentparttext.py new file mode 100644 index 0000000..4d7761b --- /dev/null +++ b/src/log10/models/components/chatcompletionrequestmessagecontentparttext.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils + + +class Type(str, Enum): + r"""The type of the content part.""" + TEXT = 'text' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionRequestMessageContentPartText: + type: Type = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('type') }}) + r"""The type of the content part.""" + text: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('text') }}) + r"""The text content.""" + + diff --git a/src/log10/models/components/chatcompletionrequestsystemmessage.py b/src/log10/models/components/chatcompletionrequestsystemmessage.py new file mode 100644 index 0000000..2366b44 --- /dev/null +++ b/src/log10/models/components/chatcompletionrequestsystemmessage.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils +from typing import Optional + + +class Role(str, Enum): + r"""The role of the messages author, in this case `system`.""" + SYSTEM = 'system' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionRequestSystemMessage: + content: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('content') }}) + r"""The contents of the system message.""" + role: Role = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('role') }}) + r"""The role of the messages author, in this case `system`.""" + name: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name'), 'exclude': lambda f: f is None }}) + r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role.""" + + diff --git a/src/log10/models/components/chatcompletionrequesttoolmessage.py b/src/log10/models/components/chatcompletionrequesttoolmessage.py new file mode 100644 index 0000000..728dcf3 --- /dev/null +++ b/src/log10/models/components/chatcompletionrequesttoolmessage.py @@ -0,0 +1,25 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils + + +class ChatCompletionRequestToolMessageRole(str, Enum): + r"""The role of the messages author, in this case `tool`.""" + TOOL = 'tool' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionRequestToolMessage: + role: ChatCompletionRequestToolMessageRole = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('role') }}) + r"""The role of the messages author, in this case `tool`.""" + content: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('content') }}) + r"""The contents of the tool message.""" + tool_call_id: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('tool_call_id') }}) + r"""Tool call that this message is responding to.""" + + diff --git a/src/log10/models/components/chatcompletionrequestusermessage.py b/src/log10/models/components/chatcompletionrequestusermessage.py new file mode 100644 index 0000000..a4858af --- /dev/null +++ b/src/log10/models/components/chatcompletionrequestusermessage.py @@ -0,0 +1,29 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from .chatcompletionrequestmessagecontentpart import ChatCompletionRequestMessageContentPart +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils +from typing import List, Optional, Union + +Content = Union[str, List[ChatCompletionRequestMessageContentPart]] + + +class ChatCompletionRequestUserMessageRole(str, Enum): + r"""The role of the messages author, in this case `user`.""" + USER = 'user' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionRequestUserMessage: + content: Content = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('content') }}) + r"""The contents of the user message.""" + role: ChatCompletionRequestUserMessageRole = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('role') }}) + r"""The role of the messages author, in this case `user`.""" + name: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name'), 'exclude': lambda f: f is None }}) + r"""An optional name for the participant. Provides the model information to differentiate between participants of the same role.""" + + diff --git a/src/log10/models/components/chatcompletionresponsemessage.py b/src/log10/models/components/chatcompletionresponsemessage.py new file mode 100644 index 0000000..3c9c439 --- /dev/null +++ b/src/log10/models/components/chatcompletionresponsemessage.py @@ -0,0 +1,48 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from .chatcompletionmessagetoolcall import ChatCompletionMessageToolCall +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils +from typing import List, Optional + + +class ChatCompletionResponseMessageRole(str, Enum): + r"""The role of the author of this message.""" + ASSISTANT = 'assistant' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionResponseMessageFunctionCall: + r"""Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + + Deprecated class: This will be removed in a future release, please migrate away from it as soon as possible. + """ + arguments: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('arguments') }}) + r"""The arguments to call the function with, as generated by the model in JSON format. Note that the model does not always generate valid JSON, and may hallucinate parameters not defined by your function schema. Validate the arguments in your code before calling your function.""" + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The name of the function to call.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionResponseMessage: + r"""A chat completion message generated by the model.""" + content: Optional[str] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('content') }}) + r"""The contents of the message.""" + role: ChatCompletionResponseMessageRole = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('role') }}) + r"""The role of the author of this message.""" + tool_calls: Optional[List[ChatCompletionMessageToolCall]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('tool_calls'), 'exclude': lambda f: f is None }}) + r"""The tool calls generated by the model, such as function calls.""" + function_call: Optional[ChatCompletionResponseMessageFunctionCall] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('function_call'), 'exclude': lambda f: f is None }}) + r"""Deprecated and replaced by `tool_calls`. The name and arguments of a function that should be called, as generated by the model. + + Deprecated field: This will be removed in a future release, please migrate away from it as soon as possible. + """ + + diff --git a/src/log10/models/components/chatcompletionstreamoptions.py b/src/log10/models/components/chatcompletionstreamoptions.py new file mode 100644 index 0000000..a740d48 --- /dev/null +++ b/src/log10/models/components/chatcompletionstreamoptions.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import Optional + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionStreamOptions: + r"""Options for streaming response. Only set this when you set `stream: true`.""" + include_usage: Optional[bool] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('include_usage'), 'exclude': lambda f: f is None }}) + r"""If set, an additional chunk will be streamed before the `data: [DONE]` message. The `usage` field on this chunk shows the token usage statistics for the entire request, and the `choices` field will always be an empty array. All other chunks will also include a `usage` field, but with a null value.""" + + diff --git a/src/log10/models/components/chatcompletiontokenlogprob.py b/src/log10/models/components/chatcompletiontokenlogprob.py new file mode 100644 index 0000000..57d17e8 --- /dev/null +++ b/src/log10/models/components/chatcompletiontokenlogprob.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import List, Optional + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class TopLogprobs: + token: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('token') }}) + r"""The token.""" + logprob: float = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('logprob') }}) + r"""The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.""" + bytes: Optional[List[int]] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('bytes') }}) + r"""A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionTokenLogprob: + token: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('token') }}) + r"""The token.""" + logprob: float = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('logprob') }}) + r"""The log probability of this token, if it is within the top 20 most likely tokens. Otherwise, the value `-9999.0` is used to signify that the token is very unlikely.""" + bytes: Optional[List[int]] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('bytes') }}) + r"""A list of integers representing the UTF-8 bytes representation of the token. Useful in instances where characters are represented by multiple tokens and their byte representations must be combined to generate the correct text representation. Can be `null` if there is no bytes representation for the token.""" + top_logprobs: List[TopLogprobs] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('top_logprobs') }}) + r"""List of the most likely tokens and their log probability, at this token position. In rare cases, there may be fewer than the number of requested `top_logprobs` returned.""" + + diff --git a/src/log10/models/components/chatcompletiontool.py b/src/log10/models/components/chatcompletiontool.py new file mode 100644 index 0000000..9d0a579 --- /dev/null +++ b/src/log10/models/components/chatcompletiontool.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from .functionobject import FunctionObject +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils + + +class ChatCompletionToolType(str, Enum): + r"""The type of the tool. Currently, only `function` is supported.""" + FUNCTION = 'function' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ChatCompletionTool: + type: ChatCompletionToolType = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('type') }}) + r"""The type of the tool. Currently, only `function` is supported.""" + function: FunctionObject = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('function') }}) + + diff --git a/src/log10/models/components/chatcompletiontoolchoiceoption.py b/src/log10/models/components/chatcompletiontoolchoiceoption.py new file mode 100644 index 0000000..4890815 --- /dev/null +++ b/src/log10/models/components/chatcompletiontoolchoiceoption.py @@ -0,0 +1,15 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +from .chatcompletionnamedtoolchoice import ChatCompletionNamedToolChoice +from enum import Enum +from typing import Union + + +class ChatCompletionToolChoiceOption1(str, Enum): + r"""`none` means the model will not call any tool and instead generates a message. `auto` means the model can pick between generating a message or calling one or more tools. `required` means the model must call one or more tools.""" + NONE = 'none' + AUTO = 'auto' + REQUIRED = 'required' + +ChatCompletionToolChoiceOption = Union['ChatCompletionToolChoiceOption1', ChatCompletionNamedToolChoice] diff --git a/src/log10/models/components/completion.py b/src/log10/models/components/completion.py new file mode 100644 index 0000000..0e17fa2 --- /dev/null +++ b/src/log10/models/components/completion.py @@ -0,0 +1,68 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from .createchatcompletionrequest import CreateChatCompletionRequest +from .createchatcompletionresponse import CreateChatCompletionResponse +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils +from typing import List, Optional + + +class Kind(str, Enum): + r"""The kind of completion i.e. chat messages or prompt""" + CHAT = 'chat' + PROMPT = 'prompt' + + +class Status(str, Enum): + r"""The status of this completion.""" + STARTED = 'started' + FINISHED = 'finished' + FAILED = 'failed' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class Stacktrace: + file: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('file') }}) + r"""The file associated with this stacktrace.""" + line: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('line') }}) + r"""The line associated with this stacktrace.""" + lineno: float = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('lineno') }}) + r"""The line number associated with this stacktrace.""" + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The function or module associated with this stacktrace.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class Completion: + organization_id: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('organization_id') }}) + r"""The unique identifier for the organization.""" + id: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('id'), 'exclude': lambda f: f is None }}) + r"""The unique identifier for this task.""" + kind: Optional[Kind] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('kind'), 'exclude': lambda f: f is None }}) + r"""The kind of completion i.e. chat messages or prompt""" + status: Optional[Status] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('status'), 'exclude': lambda f: f is None }}) + r"""The status of this completion.""" + tags: Optional[List[str]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('tags'), 'exclude': lambda f: f is None }}) + r"""The tags for this completion.""" + request: Optional[CreateChatCompletionRequest] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('request'), 'exclude': lambda f: f is None }}) + response: Optional[CreateChatCompletionResponse] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('response'), 'exclude': lambda f: f is None }}) + r"""Represents a chat completion response returned by model, based on the provided input.""" + stacktrace: Optional[List[Stacktrace]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('stacktrace'), 'exclude': lambda f: f is None }}) + r"""The stacktrace for this completion.""" + session_id: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('session_id'), 'exclude': lambda f: f is None }}) + r"""The session id for this completion.""" + duration: Optional[float] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('duration'), 'exclude': lambda f: f is None }}) + r"""The duration of this completion in seconds.""" + failure_kind: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('failure_kind'), 'exclude': lambda f: f is None }}) + r"""The failure kind of this completion.""" + failure_reason: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('failure_reason'), 'exclude': lambda f: f is None }}) + r"""The failure reason of this completion.""" + + diff --git a/src/log10/models/components/completionusage.py b/src/log10/models/components/completionusage.py new file mode 100644 index 0000000..014ab52 --- /dev/null +++ b/src/log10/models/components/completionusage.py @@ -0,0 +1,20 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from log10 import utils + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class CompletionUsage: + r"""Usage statistics for the completion request.""" + completion_tokens: int = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('completion_tokens') }}) + r"""Number of tokens in the generated completion.""" + prompt_tokens: int = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('prompt_tokens') }}) + r"""Number of tokens in the prompt.""" + total_tokens: int = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('total_tokens') }}) + r"""Total number of tokens used in the request (prompt + completion).""" + + diff --git a/src/log10/models/components/createchatcompletionrequest.py b/src/log10/models/components/createchatcompletionrequest.py new file mode 100644 index 0000000..c58aef4 --- /dev/null +++ b/src/log10/models/components/createchatcompletionrequest.py @@ -0,0 +1,167 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from .chatcompletionfunctioncalloption import ChatCompletionFunctionCallOption +from .chatcompletionfunctions import ChatCompletionFunctions +from .chatcompletionrequestmessage import ChatCompletionRequestMessage +from .chatcompletionstreamoptions import ChatCompletionStreamOptions +from .chatcompletiontool import ChatCompletionTool +from .chatcompletiontoolchoiceoption import ChatCompletionToolChoiceOption +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils +from typing import Dict, List, Optional, Union + + +class Two(str, Enum): + GPT_4_TURBO = 'gpt-4-turbo' + GPT_4_TURBO_2024_04_09 = 'gpt-4-turbo-2024-04-09' + GPT_4_0125_PREVIEW = 'gpt-4-0125-preview' + GPT_4_TURBO_PREVIEW = 'gpt-4-turbo-preview' + GPT_4_1106_PREVIEW = 'gpt-4-1106-preview' + GPT_4_VISION_PREVIEW = 'gpt-4-vision-preview' + GPT_4 = 'gpt-4' + GPT_4_0314 = 'gpt-4-0314' + GPT_4_0613 = 'gpt-4-0613' + GPT_4_32K = 'gpt-4-32k' + GPT_4_32K_0314 = 'gpt-4-32k-0314' + GPT_4_32K_0613 = 'gpt-4-32k-0613' + GPT_3_5_TURBO = 'gpt-3.5-turbo' + GPT_3_5_TURBO_16K = 'gpt-3.5-turbo-16k' + GPT_3_5_TURBO_0301 = 'gpt-3.5-turbo-0301' + GPT_3_5_TURBO_0613 = 'gpt-3.5-turbo-0613' + GPT_3_5_TURBO_1106 = 'gpt-3.5-turbo-1106' + GPT_3_5_TURBO_0125 = 'gpt-3.5-turbo-0125' + GPT_3_5_TURBO_16K_0613 = 'gpt-3.5-turbo-16k-0613' + +Model = Union[str, 'Two'] + + +class CreateChatCompletionRequestType(str, Enum): + r"""Must be one of `text` or `json_object`.""" + TEXT = 'text' + JSON_OBJECT = 'json_object' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ResponseFormat: + r"""An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + """ + type: Optional[CreateChatCompletionRequestType] = dataclasses.field(default=CreateChatCompletionRequestType.TEXT, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('type'), 'exclude': lambda f: f is None }}) + r"""Must be one of `text` or `json_object`.""" + + + +Stop = Union[str, List[str]] + + +class One(str, Enum): + r"""`none` means the model will not call a function and instead generates a message. `auto` means the model can pick between generating a message or calling a function.""" + NONE = 'none' + AUTO = 'auto' + +CreateChatCompletionRequestFunctionCall = Union['One', ChatCompletionFunctionCallOption] + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class CreateChatCompletionRequest: + UNSET='__SPEAKEASY_UNSET__' + messages: List[ChatCompletionRequestMessage] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('messages') }}) + r"""A list of messages comprising the conversation so far. [Example Python code](https://cookbook.openai.com/examples/how_to_format_inputs_to_chatgpt_models).""" + model: Model = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('model') }}) + r"""ID of the model to use. See the [model endpoint compatibility](/docs/models/model-endpoint-compatibility) table for details on which models work with the Chat API.""" + frequency_penalty: Optional[float] = dataclasses.field(default=0, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('frequency_penalty'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model's likelihood to repeat the same line verbatim. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + """ + logit_bias: Optional[Dict[str, int]] = dataclasses.field(default=UNSET, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('logit_bias'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. + """ + logprobs: Optional[bool] = dataclasses.field(default=False, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('logprobs'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""Whether to return log probabilities of the output tokens or not. If true, returns the log probabilities of each output token returned in the `content` of `message`.""" + top_logprobs: Optional[int] = dataclasses.field(default=UNSET, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('top_logprobs'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""An integer between 0 and 20 specifying the number of most likely tokens to return at each token position, each with an associated log probability. `logprobs` must be set to `true` if this parameter is used.""" + max_tokens: Optional[int] = dataclasses.field(default=UNSET, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('max_tokens'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""The maximum number of [tokens](/tokenizer) that can be generated in the chat completion. + + The total length of input tokens and generated tokens is limited by the model's context length. [Example Python code](https://cookbook.openai.com/examples/how_to_count_tokens_with_tiktoken) for counting tokens. + """ + n: Optional[int] = dataclasses.field(default=1, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('n'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""How many chat completion choices to generate for each input message. Note that you will be charged based on the number of generated tokens across all of the choices. Keep `n` as `1` to minimize costs.""" + presence_penalty: Optional[float] = dataclasses.field(default=0, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('presence_penalty'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model's likelihood to talk about new topics. + + [See more information about frequency and presence penalties.](/docs/guides/text-generation/parameter-details) + """ + response_format: Optional[ResponseFormat] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('response_format'), 'exclude': lambda f: f is None }}) + r"""An object specifying the format that the model must output. Compatible with [GPT-4 Turbo](/docs/models/gpt-4-and-gpt-4-turbo) and all GPT-3.5 Turbo models newer than `gpt-3.5-turbo-1106`. + + Setting to `{ \"type\": \"json_object\" }` enables JSON mode, which guarantees the message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to produce JSON yourself via a system or user message. Without this, the model may generate an unending stream of whitespace until the generation reaches the token limit, resulting in a long-running and seemingly \"stuck\" request. Also note that the message content may be partially cut off if `finish_reason=\"length\"`, which indicates the generation exceeded `max_tokens` or the conversation exceeded the max context length. + """ + seed: Optional[int] = dataclasses.field(default=UNSET, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('seed'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""This feature is in Beta. + If specified, our system will make a best effort to sample deterministically, such that repeated requests with the same `seed` and parameters should return the same result. + Determinism is not guaranteed, and you should refer to the `system_fingerprint` response parameter to monitor changes in the backend. + """ + stop: Optional[Stop] = dataclasses.field(default=UNSET, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('stop'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""Up to 4 sequences where the API will stop generating further tokens.""" + stream: Optional[bool] = dataclasses.field(default=False, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('stream'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions).""" + stream_options: Optional[ChatCompletionStreamOptions] = dataclasses.field(default=UNSET, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('stream_options'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""Options for streaming response. Only set this when you set `stream: true`.""" + temperature: Optional[float] = dataclasses.field(default=1, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('temperature'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. + + We generally recommend altering this or `top_p` but not both. + """ + top_p: Optional[float] = dataclasses.field(default=1, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('top_p'), 'exclude': lambda f: f is CreateChatCompletionRequest.UNSET }}) + r"""An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + tools: Optional[List[ChatCompletionTool]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('tools'), 'exclude': lambda f: f is None }}) + r"""A list of tools the model may call. Currently, only functions are supported as a tool. Use this to provide a list of functions the model may generate JSON inputs for. A max of 128 functions are supported.""" + tool_choice: Optional[ChatCompletionToolChoiceOption] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('tool_choice'), 'exclude': lambda f: f is None }}) + r"""Controls which (if any) tool is called by the model. + `none` means the model will not call any tool and instead generates a message. + `auto` means the model can pick between generating a message or calling one or more tools. + `required` means the model must call one or more tools. + Specifying a particular tool via `{\"type\": \"function\", \"function\": {\"name\": \"my_function\"}}` forces the model to call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools are present. + """ + user: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('user'), 'exclude': lambda f: f is None }}) + r"""A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids).""" + function_call: Optional[CreateChatCompletionRequestFunctionCall] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('function_call'), 'exclude': lambda f: f is None }}) + r"""Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + `none` means the model will not call a function and instead generates a message. + `auto` means the model can pick between generating a message or calling a function. + Specifying a particular function via `{\"name\": \"my_function\"}` forces the model to call that function. + + `none` is the default when no functions are present. `auto` is the default if functions are present. + + Deprecated field: This will be removed in a future release, please migrate away from it as soon as possible. + """ + functions: Optional[List[ChatCompletionFunctions]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('functions'), 'exclude': lambda f: f is None }}) + r"""Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + + Deprecated field: This will be removed in a future release, please migrate away from it as soon as possible. + """ + + diff --git a/src/log10/models/components/createchatcompletionresponse.py b/src/log10/models/components/createchatcompletionresponse.py new file mode 100644 index 0000000..e30183a --- /dev/null +++ b/src/log10/models/components/createchatcompletionresponse.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from .chatcompletionresponsemessage import ChatCompletionResponseMessage +from .chatcompletiontokenlogprob import ChatCompletionTokenLogprob +from .completionusage import CompletionUsage +from dataclasses_json import Undefined, dataclass_json +from enum import Enum +from log10 import utils +from typing import List, Optional + + +class FinishReason(str, Enum): + r"""The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + """ + STOP = 'stop' + LENGTH = 'length' + TOOL_CALLS = 'tool_calls' + CONTENT_FILTER = 'content_filter' + FUNCTION_CALL = 'function_call' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class Logprobs: + r"""Log probability information for the choice.""" + content: Optional[List[ChatCompletionTokenLogprob]] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('content') }}) + r"""A list of message content tokens with log probability information.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class Choices: + finish_reason: FinishReason = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('finish_reason') }}) + r"""The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence, + `length` if the maximum number of tokens specified in the request was reached, + `content_filter` if content was omitted due to a flag from our content filters, + `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function. + """ + index: int = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('index') }}) + r"""The index of the choice in the list of choices.""" + message: ChatCompletionResponseMessage = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('message') }}) + r"""A chat completion message generated by the model.""" + logprobs: Optional[Logprobs] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('logprobs') }}) + r"""Log probability information for the choice.""" + + + + +class Object(str, Enum): + r"""The object type, which is always `chat.completion`.""" + CHAT_COMPLETION = 'chat.completion' + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class CreateChatCompletionResponse: + r"""Represents a chat completion response returned by model, based on the provided input.""" + id: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('id') }}) + r"""A unique identifier for the chat completion.""" + choices: List[Choices] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('choices') }}) + r"""A list of chat completion choices. Can be more than one if `n` is greater than 1.""" + created: int = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('created') }}) + r"""The Unix timestamp (in seconds) of when the chat completion was created.""" + model: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('model') }}) + r"""The model used for the chat completion.""" + object: Object = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('object') }}) + r"""The object type, which is always `chat.completion`.""" + system_fingerprint: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('system_fingerprint'), 'exclude': lambda f: f is None }}) + r"""This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism. + """ + usage: Optional[CompletionUsage] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('usage'), 'exclude': lambda f: f is None }}) + r"""Usage statistics for the completion request.""" + + diff --git a/src/log10/models/components/feedback.py b/src/log10/models/components/feedback.py new file mode 100644 index 0000000..5afea38 --- /dev/null +++ b/src/log10/models/components/feedback.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import List, Optional + + +@dataclasses.dataclass +class JSONValues: + r"""The values of the feedback. Must be valid JSON according to the task schema.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class Feedback: + task_id: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('task_id') }}) + r"""The unique identifier for the task associated with this feedback.""" + json_values: JSONValues = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('json_values') }}) + r"""The values of the feedback. Must be valid JSON according to the task schema.""" + matched_completion_ids: List[str] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('matched_completion_ids') }}) + r"""The matched completion ids associated with this feedback.""" + comment: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('comment') }}) + r"""The comment associated with this feedback.""" + id: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('id'), 'exclude': lambda f: f is None }}) + r"""The unique identifier for this feedback.""" + created_at_ms: Optional[float] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('created_at_ms'), 'exclude': lambda f: f is None }}) + r"""The epoch this schema was created.""" + completions_summary: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('completions_summary'), 'exclude': lambda f: f is None }}) + + diff --git a/src/log10/models/components/functionobject.py b/src/log10/models/components/functionobject.py new file mode 100644 index 0000000..4cdfcaa --- /dev/null +++ b/src/log10/models/components/functionobject.py @@ -0,0 +1,23 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import Any, Dict, Optional + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class FunctionObject: + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The name of the function to be called. Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length of 64.""" + description: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('description'), 'exclude': lambda f: f is None }}) + r"""A description of what the function does, used by the model to choose when and how to call the function.""" + parameters: Optional[Dict[str, Any]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('parameters'), 'exclude': lambda f: f is None }}) + r"""The parameters the functions accepts, described as a JSON Schema object. See the [guide](/docs/guides/text-generation/function-calling) for examples, and the [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + diff --git a/src/log10/models/components/httpmetadata.py b/src/log10/models/components/httpmetadata.py new file mode 100644 index 0000000..5a87cb4 --- /dev/null +++ b/src/log10/models/components/httpmetadata.py @@ -0,0 +1,17 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +import requests as requests_http +from dataclasses_json import Undefined, dataclass_json + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class HTTPMetadata: + response: requests_http.Response = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + r"""Raw HTTP response; suitable for custom response parsing""" + request: requests_http.Request = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + r"""Raw HTTP request; suitable for debugging""" + + diff --git a/src/log10/models/components/security.py b/src/log10/models/components/security.py new file mode 100644 index 0000000..652420a --- /dev/null +++ b/src/log10/models/components/security.py @@ -0,0 +1,12 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from typing import Optional + + +@dataclasses.dataclass +class Security: + log10_token: Optional[str] = dataclasses.field(default=None, metadata={'security': { 'scheme': True, 'type': 'apiKey', 'sub_type': 'header', 'field_name': 'X-Log10-Token' }}) + + diff --git a/src/log10/models/components/session.py b/src/log10/models/components/session.py new file mode 100644 index 0000000..3a35eb2 --- /dev/null +++ b/src/log10/models/components/session.py @@ -0,0 +1,16 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import Optional + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class Session: + id: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('id'), 'exclude': lambda f: f is None }}) + r"""The unique identifier for this session.""" + + diff --git a/src/log10/models/components/task.py b/src/log10/models/components/task.py new file mode 100644 index 0000000..be07761 --- /dev/null +++ b/src/log10/models/components/task.py @@ -0,0 +1,40 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import Optional + + +@dataclasses.dataclass +class JSONSchema: + r"""The schema of the task. Must be valid JSON Schema.""" + + + + +@dataclasses.dataclass +class CompletionTagsSelector: + r"""The completion tag matching with this task i.e. surfaced as needing feedback.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class Task: + json_schema: JSONSchema = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('json_schema') }}) + r"""The schema of the task. Must be valid JSON Schema.""" + name: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('name') }}) + r"""The name of the task.""" + instruction: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('instruction') }}) + r"""The instructions for this task.""" + completion_tags_selector: CompletionTagsSelector = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('completion_tags_selector') }}) + r"""The completion tag matching with this task i.e. surfaced as needing feedback.""" + id: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('id'), 'exclude': lambda f: f is None }}) + r"""The unique identifier for this task.""" + created_at_ms: Optional[float] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('created_at_ms'), 'exclude': lambda f: f is None }}) + r"""The epoch this schema was created.""" + + diff --git a/src/log10/models/errors/__init__.py b/src/log10/models/errors/__init__.py new file mode 100644 index 0000000..88d0916 --- /dev/null +++ b/src/log10/models/errors/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from .sdkerror import * + +__all__ = ["SDKError"] diff --git a/src/log10/models/errors/sdkerror.py b/src/log10/models/errors/sdkerror.py new file mode 100644 index 0000000..6bb02bb --- /dev/null +++ b/src/log10/models/errors/sdkerror.py @@ -0,0 +1,24 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import requests as requests_http + + +class SDKError(Exception): + """Represents an error returned by the API.""" + message: str + status_code: int + body: str + raw_response: requests_http.Response + + def __init__(self, message: str, status_code: int, body: str, raw_response: requests_http.Response): + self.message = message + self.status_code = status_code + self.body = body + self.raw_response = raw_response + + def __str__(self): + body = '' + if len(self.body) > 0: + body = f'\n{self.body}' + + return f'{self.message}: Status {self.status_code}{body}' diff --git a/src/log10/models/internal/__init__.py b/src/log10/models/internal/__init__.py new file mode 100644 index 0000000..51f9e9d --- /dev/null +++ b/src/log10/models/internal/__init__.py @@ -0,0 +1,5 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from .globals import * + +__all__ = ["Globals"] diff --git a/src/log10/models/internal/globals.py b/src/log10/models/internal/globals.py new file mode 100644 index 0000000..094e24d --- /dev/null +++ b/src/log10/models/internal/globals.py @@ -0,0 +1,11 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses + + +@dataclasses.dataclass +class Globals: + x_log10_organization: str = dataclasses.field(metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + diff --git a/src/log10/models/operations/__init__.py b/src/log10/models/operations/__init__.py new file mode 100644 index 0000000..ef1743c --- /dev/null +++ b/src/log10/models/operations/__init__.py @@ -0,0 +1,14 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from .create import * +from .createfeedbacktask import * +from .createsession import * +from .get import * +from .getfeedbacktask import * +from .list import * +from .listfeedbacktasks import * +from .listungraded import * +from .update import * +from .upload import * + +__all__ = ["CreateFeedbackTaskResponse","CreateGlobals","CreateRequest","CreateResponse","CreateSessionGlobals","CreateSessionRequest","CreateSessionResponse","CreateSessionResponseBody","GetFeedbackTaskRequest","GetFeedbackTaskResponse","GetGlobals","GetRequest","GetResponse","JSONValues","ListFeedbackTasksResponse","ListGlobals","ListRequest","ListRequestBody","ListResponse","ListResponseBody","ListUngradedGlobals","ListUngradedRequest","ListUngradedResponse","ListUngradedResponseBody","One","RequestBodyJSONValues","Two","UpdateGlobals","UpdateRequest","UpdateResponse","UploadGlobals","UploadRequest","UploadRequestBody","UploadResponse"] diff --git a/src/log10/models/operations/create.py b/src/log10/models/operations/create.py new file mode 100644 index 0000000..59cd61b --- /dev/null +++ b/src/log10/models/operations/create.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import completion as components_completion +from ...models.components import httpmetadata as components_httpmetadata +from dataclasses_json import Undefined, dataclass_json +from typing import Any, Optional + + +@dataclasses.dataclass +class CreateGlobals: + x_log10_organization: str = dataclasses.field(metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclasses.dataclass +class CreateRequest: + completion: components_completion.Completion = dataclasses.field(metadata={'request': { 'media_type': 'application/json' }}) + x_log10_organization: Optional[str] = dataclasses.field(default=None, metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class CreateResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + any: Optional[Any] = dataclasses.field(default=None) + r"""Created""" + completion: Optional[components_completion.Completion] = dataclasses.field(default=None) + r"""Created""" + + diff --git a/src/log10/models/operations/createfeedbacktask.py b/src/log10/models/operations/createfeedbacktask.py new file mode 100644 index 0000000..8c6e2ac --- /dev/null +++ b/src/log10/models/operations/createfeedbacktask.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import httpmetadata as components_httpmetadata +from ...models.components import task as components_task +from dataclasses_json import Undefined, dataclass_json +from typing import Optional + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class CreateFeedbackTaskResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + task: Optional[components_task.Task] = dataclasses.field(default=None) + r"""OK""" + + diff --git a/src/log10/models/operations/createsession.py b/src/log10/models/operations/createsession.py new file mode 100644 index 0000000..f50c25a --- /dev/null +++ b/src/log10/models/operations/createsession.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import httpmetadata as components_httpmetadata +from ...models.components import session as components_session +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import Optional + + +@dataclasses.dataclass +class CreateSessionGlobals: + x_log10_organization: str = dataclasses.field(metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclasses.dataclass +class CreateSessionRequest: + x_log10_organization: Optional[str] = dataclasses.field(default=None, metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class CreateSessionResponseBody: + r"""Created""" + session: Optional[components_session.Session] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('session'), 'exclude': lambda f: f is None }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class CreateSessionResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + object: Optional[CreateSessionResponseBody] = dataclasses.field(default=None) + r"""Created""" + + diff --git a/src/log10/models/operations/get.py b/src/log10/models/operations/get.py new file mode 100644 index 0000000..b50833b --- /dev/null +++ b/src/log10/models/operations/get.py @@ -0,0 +1,34 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import feedback as components_feedback +from ...models.components import httpmetadata as components_httpmetadata +from dataclasses_json import Undefined, dataclass_json +from typing import Optional + + +@dataclasses.dataclass +class GetGlobals: + x_log10_organization: str = dataclasses.field(metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclasses.dataclass +class GetRequest: + feedback_id: str = dataclasses.field(metadata={'path_param': { 'field_name': 'feedbackId', 'style': 'simple', 'explode': False }}) + r"""The feedback id to fetch.""" + x_log10_organization: Optional[str] = dataclasses.field(default=None, metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class GetResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + feedback: Optional[components_feedback.Feedback] = dataclasses.field(default=None) + r"""OK""" + + diff --git a/src/log10/models/operations/getfeedbacktask.py b/src/log10/models/operations/getfeedbacktask.py new file mode 100644 index 0000000..1bee914 --- /dev/null +++ b/src/log10/models/operations/getfeedbacktask.py @@ -0,0 +1,26 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import httpmetadata as components_httpmetadata +from ...models.components import task as components_task +from dataclasses_json import Undefined, dataclass_json +from typing import Optional + + +@dataclasses.dataclass +class GetFeedbackTaskRequest: + task_id: str = dataclasses.field(metadata={'path_param': { 'field_name': 'taskId', 'style': 'simple', 'explode': False }}) + r"""The task id to fetch.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class GetFeedbackTaskResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + task: Optional[components_task.Task] = dataclasses.field(default=None) + r"""OK""" + + diff --git a/src/log10/models/operations/list.py b/src/log10/models/operations/list.py new file mode 100644 index 0000000..1273d0d --- /dev/null +++ b/src/log10/models/operations/list.py @@ -0,0 +1,58 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import feedback as components_feedback +from ...models.components import httpmetadata as components_httpmetadata +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import List, Optional + + +@dataclasses.dataclass +class ListGlobals: + x_log10_organization: str = dataclasses.field(metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ListRequestBody: + offset: Optional[int] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('offset'), 'exclude': lambda f: f is None }}) + r"""The offset to start fetching feedback from.""" + limit: Optional[int] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('limit'), 'exclude': lambda f: f is None }}) + r"""The number of feedback to fetch.""" + completion_id: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('completion_id'), 'exclude': lambda f: f is None }}) + r"""The completion id to fetch feedback for.""" + task_id: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('task_id'), 'exclude': lambda f: f is None }}) + r"""The task id to fetch feedback for.""" + + + + +@dataclasses.dataclass +class ListRequest: + x_log10_organization: Optional[str] = dataclasses.field(default=None, metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + request_body: Optional[ListRequestBody] = dataclasses.field(default=None, metadata={'request': { 'media_type': 'application/json' }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ListResponseBody: + r"""OK""" + feedback: Optional[List[components_feedback.Feedback]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('feedback'), 'exclude': lambda f: f is None }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ListResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + object: Optional[ListResponseBody] = dataclasses.field(default=None) + r"""OK""" + + diff --git a/src/log10/models/operations/listfeedbacktasks.py b/src/log10/models/operations/listfeedbacktasks.py new file mode 100644 index 0000000..103af30 --- /dev/null +++ b/src/log10/models/operations/listfeedbacktasks.py @@ -0,0 +1,18 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import httpmetadata as components_httpmetadata +from ...models.components import task as components_task +from dataclasses_json import Undefined, dataclass_json +from typing import List, Optional + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ListFeedbackTasksResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + tasks: Optional[List[components_task.Task]] = dataclasses.field(default=None) + r"""OK""" + + diff --git a/src/log10/models/operations/listungraded.py b/src/log10/models/operations/listungraded.py new file mode 100644 index 0000000..9e7709a --- /dev/null +++ b/src/log10/models/operations/listungraded.py @@ -0,0 +1,42 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import completion as components_completion +from ...models.components import httpmetadata as components_httpmetadata +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import List, Optional + + +@dataclasses.dataclass +class ListUngradedGlobals: + x_log10_organization: str = dataclasses.field(metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclasses.dataclass +class ListUngradedRequest: + x_log10_organization: Optional[str] = dataclasses.field(default=None, metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ListUngradedResponseBody: + r"""OK""" + completions: Optional[List[components_completion.Completion]] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('completions'), 'exclude': lambda f: f is None }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class ListUngradedResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + object: Optional[ListUngradedResponseBody] = dataclasses.field(default=None) + r"""OK""" + + diff --git a/src/log10/models/operations/update.py b/src/log10/models/operations/update.py new file mode 100644 index 0000000..ec1a2cb --- /dev/null +++ b/src/log10/models/operations/update.py @@ -0,0 +1,35 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import completion as components_completion +from ...models.components import httpmetadata as components_httpmetadata +from dataclasses_json import Undefined, dataclass_json +from typing import Optional + + +@dataclasses.dataclass +class UpdateGlobals: + x_log10_organization: str = dataclasses.field(metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclasses.dataclass +class UpdateRequest: + completion_id: str = dataclasses.field(metadata={'path_param': { 'field_name': 'completionId', 'style': 'simple', 'explode': False }}) + r"""The completion id to update.""" + completion: components_completion.Completion = dataclasses.field(metadata={'request': { 'media_type': 'application/json' }}) + x_log10_organization: Optional[str] = dataclasses.field(default=None, metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class UpdateResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + completion: Optional[components_completion.Completion] = dataclasses.field(default=None) + r"""OK""" + + diff --git a/src/log10/models/operations/upload.py b/src/log10/models/operations/upload.py new file mode 100644 index 0000000..42d1fdc --- /dev/null +++ b/src/log10/models/operations/upload.py @@ -0,0 +1,98 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from __future__ import annotations +import dataclasses +from ...models.components import feedback as components_feedback +from ...models.components import httpmetadata as components_httpmetadata +from dataclasses_json import Undefined, dataclass_json +from log10 import utils +from typing import List, Optional, Union + + +@dataclasses.dataclass +class UploadGlobals: + x_log10_organization: str = dataclasses.field(metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclasses.dataclass +class RequestBodyJSONValues: + r"""The values of the feedback. Must be valid JSON according to the task schema.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class Two: + task_id: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('task_id') }}) + r"""The unique identifier for the task associated with this feedback.""" + json_values: RequestBodyJSONValues = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('json_values') }}) + r"""The values of the feedback. Must be valid JSON according to the task schema.""" + matched_completion_ids: List[str] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('matched_completion_ids') }}) + r"""The matched completion ids associated with this feedback.""" + comment: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('comment') }}) + r"""The comment associated with this feedback.""" + completion_ids: List[str] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('completion_ids') }}) + r"""The completion ids to associate with this feedback.""" + id: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('id'), 'exclude': lambda f: f is None }}) + r"""The unique identifier for this feedback.""" + created_at_ms: Optional[float] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('created_at_ms'), 'exclude': lambda f: f is None }}) + r"""The epoch this schema was created.""" + completions_summary: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('completions_summary'), 'exclude': lambda f: f is None }}) + + + + +@dataclasses.dataclass +class JSONValues: + r"""The values of the feedback. Must be valid JSON according to the task schema.""" + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class One: + task_id: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('task_id') }}) + r"""The unique identifier for the task associated with this feedback.""" + json_values: JSONValues = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('json_values') }}) + r"""The values of the feedback. Must be valid JSON according to the task schema.""" + matched_completion_ids: List[str] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('matched_completion_ids') }}) + r"""The matched completion ids associated with this feedback.""" + comment: str = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('comment') }}) + r"""The comment associated with this feedback.""" + completion_tags_selector: List[str] = dataclasses.field(metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('completion_tags_selector') }}) + r"""The completion tags associated with this feedback.""" + id: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('id'), 'exclude': lambda f: f is None }}) + r"""The unique identifier for this feedback.""" + created_at_ms: Optional[float] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('created_at_ms'), 'exclude': lambda f: f is None }}) + r"""The epoch this schema was created.""" + completions_summary: Optional[str] = dataclasses.field(default=None, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('completions_summary'), 'exclude': lambda f: f is None }}) + allow_unmatched_feedback: Optional[bool] = dataclasses.field(default=False, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('allow_unmatched_feedback'), 'exclude': lambda f: f is None }}) + r"""Whether to allow unmatched feedback. Defaults to false.""" + max_matched_completions: Optional[int] = dataclasses.field(default=100, metadata={'dataclasses_json': { 'letter_case': utils.get_field_name('max_matched_completions'), 'exclude': lambda f: f is None }}) + r"""The maximum number of matched completions. Returns error if exceeded. Defaults to 100.""" + + + +UploadRequestBody = Union['One', 'Two'] + + +@dataclasses.dataclass +class UploadRequest: + request_body: UploadRequestBody = dataclasses.field(metadata={'request': { 'media_type': 'application/json' }}) + x_log10_organization: Optional[str] = dataclasses.field(default=None, metadata={'header': { 'field_name': 'X-Log10-Organization', 'style': 'simple', 'explode': False }}) + + + + +@dataclass_json(undefined=Undefined.EXCLUDE) +@dataclasses.dataclass +class UploadResponse: + http_meta: components_httpmetadata.HTTPMetadata = dataclasses.field(metadata={'dataclasses_json': { 'exclude': lambda f: True }}) + feedback: Optional[components_feedback.Feedback] = dataclasses.field(default=None) + r"""OK""" + + diff --git a/src/log10/sdk.py b/src/log10/sdk.py new file mode 100644 index 0000000..9d3203c --- /dev/null +++ b/src/log10/sdk.py @@ -0,0 +1,97 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import requests as requests_http +from .completions import Completions +from .feedback import Feedback +from .feedbacktasks import FeedbackTasks +from .sdkconfiguration import SDKConfiguration +from .sessions import Sessions +from .utils.retries import RetryConfig +from log10 import utils +from log10._hooks import SDKHooks +from log10.models import components, internal +from typing import Callable, Dict, Optional, Union + +class Log10: + r"""Log10 Feedback API Spec: Log10 Feedback API Spec""" + completions: Completions + r"""Completions""" + sessions: Sessions + r"""Sessions""" + feedback: Feedback + r"""Feedback""" + feedback_tasks: FeedbackTasks + r"""FeedbackTasks""" + + sdk_configuration: SDKConfiguration + + def __init__(self, + log10_token: Union[Optional[str], Callable[[], Optional[str]]] = None, + x_log10_organization: str = None, + server_idx: Optional[int] = None, + server_url: Optional[str] = None, + url_params: Optional[Dict[str, str]] = None, + client: Optional[requests_http.Session] = None, + retry_config: Optional[RetryConfig] = None + ) -> None: + """Instantiates the SDK configuring it with the provided parameters. + + :param log10_token: The log10_token required for authentication + :type log10_token: Union[Optional[str], Callable[[], Optional[str]]] + :param x_log10_organization: Configures the x_log10_organization parameter for all supported operations + :type x_log10_organization: str + :param server_idx: The index of the server to use for all operations + :type server_idx: int + :param server_url: The server URL to use for all operations + :type server_url: str + :param url_params: Parameters to optionally template the server URL with + :type url_params: Dict[str, str] + :param client: The requests.Session HTTP client to use for all operations + :type client: requests_http.Session + :param retry_config: The utils.RetryConfig to use globally + :type retry_config: RetryConfig + """ + if client is None: + client = requests_http.Session() + + if callable(log10_token): + def security(): + return components.Security(log10_token = log10_token()) + else: + security = components.Security(log10_token = log10_token) + + if server_url is not None: + if url_params is not None: + server_url = utils.template_url(server_url, url_params) + + _globals = internal.Globals( + x_log10_organization=x_log10_organization, + ) + + self.sdk_configuration = SDKConfiguration( + client, + _globals, + security, + server_url, + server_idx, + retry_config=retry_config + ) + + hooks = SDKHooks() + + current_server_url, *_ = self.sdk_configuration.get_server_details() + server_url, self.sdk_configuration.client = hooks.sdk_init(current_server_url, self.sdk_configuration.client) + if current_server_url != server_url: + self.sdk_configuration.server_url = server_url + + # pylint: disable=protected-access + self.sdk_configuration.__dict__['_hooks'] = hooks + + self._init_sdks() + + + def _init_sdks(self): + self.completions = Completions(self.sdk_configuration) + self.sessions = Sessions(self.sdk_configuration) + self.feedback = Feedback(self.sdk_configuration) + self.feedback_tasks = FeedbackTasks(self.sdk_configuration) diff --git a/src/log10/sdkconfiguration.py b/src/log10/sdkconfiguration.py new file mode 100644 index 0000000..290ca50 --- /dev/null +++ b/src/log10/sdkconfiguration.py @@ -0,0 +1,45 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + + +import requests as requests_http +from ._hooks import SDKHooks +from .utils import utils +from .utils.retries import RetryConfig +from dataclasses import dataclass +from log10.models import components, internal +from typing import Callable, Dict, Optional, Tuple, Union + + +SERVERS = [ + 'https://log10.io', +] +"""Contains the list of servers available to the SDK""" + +@dataclass +class SDKConfiguration: + client: requests_http.Session + globals: internal.Globals + security: Union[components.Security,Callable[[], components.Security]] = None + server_url: Optional[str] = '' + server_idx: Optional[int] = 0 + language: str = 'python' + openapi_doc_version: str = '1.0.0' + sdk_version: str = '0.0.1' + gen_version: str = '2.338.1' + user_agent: str = 'speakeasy-sdk/python 0.0.1 2.338.1 1.0.0 log10py' + retry_config: Optional[RetryConfig] = None + + def __post_init__(self): + self._hooks = SDKHooks() + + def get_server_details(self) -> Tuple[str, Dict[str, str]]: + if self.server_url is not None and self.server_url != '': + return utils.remove_suffix(self.server_url, '/'), {} + if self.server_idx is None: + self.server_idx = 0 + + return SERVERS[self.server_idx], {} + + + def get_hooks(self) -> SDKHooks: + return self._hooks diff --git a/src/log10/sessions.py b/src/log10/sessions.py new file mode 100644 index 0000000..f189ea0 --- /dev/null +++ b/src/log10/sessions.py @@ -0,0 +1,83 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import requests as requests_http +from .sdkconfiguration import SDKConfiguration +from log10 import utils +from log10._hooks import AfterErrorContext, AfterSuccessContext, BeforeRequestContext, HookContext +from log10.models import components, errors, operations +from typing import Optional + +class Sessions: + r"""Sessions""" + sdk_configuration: SDKConfiguration + + def __init__(self, sdk_config: SDKConfiguration) -> None: + self.sdk_configuration = sdk_config + + + + def create(self, x_log10_organization: Optional[str] = None) -> operations.CreateSessionResponse: + r"""Create a session""" + hook_ctx = HookContext(operation_id='createSession', oauth2_scopes=[], security_source=self.sdk_configuration.security) + request = operations.CreateSessionRequest( + x_log10_organization=x_log10_organization, + ) + + _globals = operations.CreateSessionGlobals( + x_log10_organization=self.sdk_configuration.globals.x_log10_organization, + ) + + base_url = utils.template_url(*self.sdk_configuration.get_server_details()) + + url = utils.generate_url(base_url, '/api/v1/sessions', request, _globals) + + if callable(self.sdk_configuration.security): + headers, query_params = utils.get_security(self.sdk_configuration.security()) + else: + headers, query_params = utils.get_security(self.sdk_configuration.security) + + headers = { **utils.get_headers(request, _globals), **headers } + query_params = { **utils.get_query_params(request, _globals), **query_params } + headers['Accept'] = 'application/json' + headers['user-agent'] = self.sdk_configuration.user_agent + client = self.sdk_configuration.client + + try: + req = client.prepare_request(requests_http.Request('POST', url, params=query_params, headers=headers)) + req = self.sdk_configuration.get_hooks().before_request(BeforeRequestContext(hook_ctx), req) + http_res = client.send(req) + except Exception as e: + _, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), None, e) + if e is not None: + raise e + + if utils.match_status_codes(['4XX','5XX'], http_res.status_code): + result, e = self.sdk_configuration.get_hooks().after_error(AfterErrorContext(hook_ctx), http_res, None) + if e is not None: + raise e + if result is not None: + http_res = result + else: + http_res = self.sdk_configuration.get_hooks().after_success(AfterSuccessContext(hook_ctx), http_res) + + + + res = operations.CreateSessionResponse(http_meta=components.HTTPMetadata(request=req, response=http_res)) + + if http_res.status_code == 201: + # pylint: disable=no-else-return + if utils.match_content_type(http_res.headers.get('Content-Type') or '', 'application/json'): + out = utils.unmarshal_json(http_res.text, Optional[operations.CreateSessionResponseBody]) + res.object = out + else: + content_type = http_res.headers.get('Content-Type') + raise errors.SDKError(f'unknown content-type received: {content_type}', http_res.status_code, http_res.text, http_res) + elif http_res.status_code >= 400 and http_res.status_code < 500 or http_res.status_code >= 500 and http_res.status_code < 600: + raise errors.SDKError('API error occurred', http_res.status_code, http_res.text, http_res) + else: + raise errors.SDKError('unknown status code received', http_res.status_code, http_res.text, http_res) + + return res + + + diff --git a/src/log10/utils/__init__.py b/src/log10/utils/__init__.py new file mode 100644 index 0000000..94b7398 --- /dev/null +++ b/src/log10/utils/__init__.py @@ -0,0 +1,4 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +from .retries import * +from .utils import * diff --git a/src/log10/utils/retries.py b/src/log10/utils/retries.py new file mode 100644 index 0000000..c40fc41 --- /dev/null +++ b/src/log10/utils/retries.py @@ -0,0 +1,119 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import random +import time +from typing import List + +import requests + + +class BackoffStrategy: + initial_interval: int + max_interval: int + exponent: float + max_elapsed_time: int + + def __init__(self, initial_interval: int, max_interval: int, exponent: float, max_elapsed_time: int): + self.initial_interval = initial_interval + self.max_interval = max_interval + self.exponent = exponent + self.max_elapsed_time = max_elapsed_time + + +class RetryConfig: + strategy: str + backoff: BackoffStrategy + retry_connection_errors: bool + + def __init__(self, strategy: str, backoff: BackoffStrategy, retry_connection_errors: bool): + self.strategy = strategy + self.backoff = backoff + self.retry_connection_errors = retry_connection_errors + + +class Retries: + config: RetryConfig + status_codes: List[str] + + def __init__(self, config: RetryConfig, status_codes: List[str]): + self.config = config + self.status_codes = status_codes + + +class TemporaryError(Exception): + response: requests.Response + + def __init__(self, response: requests.Response): + self.response = response + + +class PermanentError(Exception): + inner: Exception + + def __init__(self, inner: Exception): + self.inner = inner + + +def retry(func, retries: Retries): + if retries.config.strategy == 'backoff': + def do_request(): + res: requests.Response + try: + res = func() + + for code in retries.status_codes: + if "X" in code.upper(): + code_range = int(code[0]) + + status_major = res.status_code / 100 + + if status_major >= code_range and status_major < code_range + 1: + raise TemporaryError(res) + else: + parsed_code = int(code) + + if res.status_code == parsed_code: + raise TemporaryError(res) + except requests.exceptions.ConnectionError as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except requests.exceptions.Timeout as exception: + if retries.config.retry_connection_errors: + raise + + raise PermanentError(exception) from exception + except TemporaryError: + raise + except Exception as exception: + raise PermanentError(exception) from exception + + return res + + return retry_with_backoff(do_request, retries.config.backoff.initial_interval, retries.config.backoff.max_interval, retries.config.backoff.exponent, retries.config.backoff.max_elapsed_time) + + return func() + + +def retry_with_backoff(func, initial_interval=500, max_interval=60000, exponent=1.5, max_elapsed_time=3600000): + start = round(time.time()*1000) + retries = 0 + + while True: + try: + return func() + except PermanentError as exception: + raise exception.inner + except Exception as exception: # pylint: disable=broad-exception-caught + now = round(time.time()*1000) + if now - start > max_elapsed_time: + if isinstance(exception, TemporaryError): + return exception.response + + raise + sleep = ((initial_interval/1000) * + exponent**retries + random.uniform(0, 1)) + sleep = min(sleep, max_interval / 1000) + time.sleep(sleep) + retries += 1 diff --git a/src/log10/utils/utils.py b/src/log10/utils/utils.py new file mode 100644 index 0000000..c263197 --- /dev/null +++ b/src/log10/utils/utils.py @@ -0,0 +1,1087 @@ +"""Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.""" + +import base64 +import json +import re +import sys +from dataclasses import Field, fields, is_dataclass, make_dataclass +from datetime import date, datetime +from decimal import Decimal +from email.message import Message +from enum import Enum +from typing import ( + Any, + Callable, + Dict, + List, + Optional, + Tuple, + Union, + get_args, + get_origin, +) +from xmlrpc.client import boolean +from typing_inspect import is_optional_type +import dateutil.parser +from dataclasses_json import DataClassJsonMixin + + +def get_security(security: Any) -> Tuple[Dict[str, str], Dict[str, str]]: + headers: Dict[str, str] = {} + query_params: Dict[str, str] = {} + + if security is None: + return headers, query_params + + sec_fields: Tuple[Field, ...] = fields(security) + for sec_field in sec_fields: + value = getattr(security, sec_field.name) + if value is None: + continue + + metadata = sec_field.metadata.get("security") + if metadata is None: + continue + if metadata.get("option"): + _parse_security_option(headers, query_params, value) + return headers, query_params + if metadata.get("scheme"): + # Special case for basic auth which could be a flattened struct + if metadata.get("sub_type") == "basic" and not is_dataclass(value): + _parse_security_scheme(headers, query_params, metadata, security) + else: + _parse_security_scheme(headers, query_params, metadata, value) + + return headers, query_params + + +def _parse_security_option( + headers: Dict[str, str], query_params: Dict[str, str], option: Any +): + opt_fields: Tuple[Field, ...] = fields(option) + for opt_field in opt_fields: + metadata = opt_field.metadata.get("security") + if metadata is None or metadata.get("scheme") is None: + continue + _parse_security_scheme( + headers, query_params, metadata, getattr(option, opt_field.name) + ) + + +def _parse_security_scheme( + headers: Dict[str, str], + query_params: Dict[str, str], + scheme_metadata: Dict, + scheme: Any, +): + scheme_type = scheme_metadata.get("type") + sub_type = scheme_metadata.get("sub_type") + + if is_dataclass(scheme): + if scheme_type == "http" and sub_type == "basic": + _parse_basic_auth_scheme(headers, scheme) + return + + scheme_fields: Tuple[Field, ...] = fields(scheme) + for scheme_field in scheme_fields: + metadata = scheme_field.metadata.get("security") + if metadata is None or metadata.get("field_name") is None: + continue + + value = getattr(scheme, scheme_field.name) + + _parse_security_scheme_value( + headers, query_params, scheme_metadata, metadata, value + ) + else: + _parse_security_scheme_value( + headers, query_params, scheme_metadata, scheme_metadata, scheme + ) + + +def _parse_security_scheme_value( + headers: Dict[str, str], + query_params: Dict[str, str], + scheme_metadata: Dict, + security_metadata: Dict, + value: Any, +): + scheme_type = scheme_metadata.get("type") + sub_type = scheme_metadata.get("sub_type") + + header_name = str(security_metadata.get("field_name")) + + if scheme_type == "apiKey": + if sub_type == "header": + headers[header_name] = value + elif sub_type == "query": + query_params[header_name] = value + else: + raise Exception("not supported") + elif scheme_type == "openIdConnect": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "oauth2": + if sub_type != "client_credentials": + headers[header_name] = _apply_bearer(value) + elif scheme_type == "http": + if sub_type == "bearer": + headers[header_name] = _apply_bearer(value) + else: + raise Exception("not supported") + else: + raise Exception("not supported") + + +def _apply_bearer(token: str) -> str: + return token.lower().startswith("bearer ") and token or f"Bearer {token}" + + +def _parse_basic_auth_scheme(headers: Dict[str, str], scheme: Any): + username = "" + password = "" + + scheme_fields: Tuple[Field, ...] = fields(scheme) + for scheme_field in scheme_fields: + metadata = scheme_field.metadata.get("security") + if metadata is None or metadata.get("field_name") is None: + continue + + field_name = metadata.get("field_name") + value = getattr(scheme, scheme_field.name) + + if field_name == "username": + username = value + if field_name == "password": + password = value + + data = f"{username}:{password}".encode() + headers["Authorization"] = f"Basic {base64.b64encode(data).decode()}" + + +def generate_url( + server_url: str, + path: str, + path_params: Any, + gbls: Optional[Any] = None, +) -> str: + path_param_values: Dict[str, str] = {} + + globals_already_populated = _populate_path_params( + path_params, gbls, path_param_values, [] + ) + if gbls is not None: + _populate_path_params(gbls, None, path_param_values, globals_already_populated) + + for key, value in path_param_values.items(): + path = path.replace("{" + key + "}", value, 1) + + return remove_suffix(server_url, "/") + path + + +def _populate_path_params( + path_params: Any, + gbls: Any, + path_param_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + path_param_fields: Tuple[Field, ...] = fields(path_params) + for field in path_param_fields: + if field.name in skip_fields: + continue + + param_metadata = field.metadata.get("path_param") + if param_metadata is None: + continue + + param = getattr(path_params, field.name) if path_params is not None else None + param, global_found = _populate_from_globals( + field.name, param, "path_param", gbls + ) + if global_found: + globals_already_populated.append(field.name) + + if param is None: + continue + + f_name = param_metadata.get("field_name", field.name) + serialization = param_metadata.get("serialization", "") + if serialization != "": + serialized_params = _get_serialized_params( + param_metadata, field.type, f_name, param + ) + for key, value in serialized_params.items(): + path_param_values[key] = value + else: + if param_metadata.get("style", "simple") == "simple": + if isinstance(param, List): + pp_vals: List[str] = [] + for pp_val in param: + if pp_val is None: + continue + pp_vals.append(_val_to_string(pp_val)) + path_param_values[param_metadata.get("field_name", field.name)] = ( + ",".join(pp_vals) + ) + elif isinstance(param, Dict): + pp_vals: List[str] = [] + for pp_key in param: + if param[pp_key] is None: + continue + if param_metadata.get("explode"): + pp_vals.append(f"{pp_key}={_val_to_string(param[pp_key])}") + else: + pp_vals.append(f"{pp_key},{_val_to_string(param[pp_key])}") + path_param_values[param_metadata.get("field_name", field.name)] = ( + ",".join(pp_vals) + ) + elif not isinstance(param, (str, int, float, complex, bool, Decimal)): + pp_vals: List[str] = [] + param_fields: Tuple[Field, ...] = fields(param) + for param_field in param_fields: + param_value_metadata = param_field.metadata.get("path_param") + if not param_value_metadata: + continue + + param_name = param_value_metadata.get("field_name", field.name) + + param_field_val = getattr(param, param_field.name) + if param_field_val is None: + continue + if param_metadata.get("explode"): + pp_vals.append( + f"{param_name}={_val_to_string(param_field_val)}" + ) + else: + pp_vals.append( + f"{param_name},{_val_to_string(param_field_val)}" + ) + path_param_values[param_metadata.get("field_name", field.name)] = ( + ",".join(pp_vals) + ) + else: + path_param_values[param_metadata.get("field_name", field.name)] = ( + _val_to_string(param) + ) + + return globals_already_populated + + +def is_optional(field): + return get_origin(field) is Union and type(None) in get_args(field) + + +def template_url(url_with_params: str, params: Dict[str, str]) -> str: + for key, value in params.items(): + url_with_params = url_with_params.replace("{" + key + "}", value) + + return url_with_params + + +def get_query_params( + query_params: Any, + gbls: Optional[Any] = None, +) -> Dict[str, List[str]]: + params: Dict[str, List[str]] = {} + + globals_already_populated = _populate_query_params(query_params, gbls, params, []) + if gbls is not None: + _populate_query_params(gbls, None, params, globals_already_populated) + + return params + + +def _populate_query_params( + query_params: Any, + gbls: Any, + query_param_values: Dict[str, List[str]], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + param_fields: Tuple[Field, ...] = fields(query_params) + for field in param_fields: + if field.name in skip_fields: + continue + + metadata = field.metadata.get("query_param") + if not metadata: + continue + + param_name = field.name + value = getattr(query_params, param_name) if query_params is not None else None + + value, global_found = _populate_from_globals( + param_name, value, "query_param", gbls + ) + if global_found: + globals_already_populated.append(param_name) + + f_name = metadata.get("field_name") + serialization = metadata.get("serialization", "") + if serialization != "": + serialized_parms = _get_serialized_params( + metadata, field.type, f_name, value + ) + for key, value in serialized_parms.items(): + if key in query_param_values: + query_param_values[key].extend(value) + else: + query_param_values[key] = [value] + else: + style = metadata.get("style", "form") + if style == "deepObject": + _populate_deep_object_query_params( + metadata, f_name, value, query_param_values + ) + elif style == "form": + _populate_delimited_query_params( + metadata, f_name, value, ",", query_param_values + ) + elif style == "pipeDelimited": + _populate_delimited_query_params( + metadata, f_name, value, "|", query_param_values + ) + else: + raise Exception("not yet implemented") + + return globals_already_populated + + +def get_headers(headers_params: Any, gbls: Optional[Any] = None) -> Dict[str, str]: + headers: Dict[str, str] = {} + + globals_already_populated = [] + if headers_params is not None: + globals_already_populated = _populate_headers(headers_params, gbls, headers, []) + if gbls is not None: + _populate_headers(gbls, None, headers, globals_already_populated) + + return headers + + +def _populate_headers( + headers_params: Any, + gbls: Any, + header_values: Dict[str, str], + skip_fields: List[str], +) -> List[str]: + globals_already_populated: List[str] = [] + + param_fields: Tuple[Field, ...] = fields(headers_params) + for field in param_fields: + if field.name in skip_fields: + continue + + metadata = field.metadata.get("header") + if not metadata: + continue + + value, global_found = _populate_from_globals( + field.name, getattr(headers_params, field.name), "header", gbls + ) + if global_found: + globals_already_populated.append(field.name) + value = _serialize_header(metadata.get("explode", False), value) + + if value != "": + header_values[metadata.get("field_name", field.name)] = value + + return globals_already_populated + + +def _get_serialized_params( + metadata: Dict, field_type: type, field_name: str, obj: Any +) -> Dict[str, str]: + params: Dict[str, str] = {} + + serialization = metadata.get("serialization", "") + if serialization == "json": + params[metadata.get("field_name", field_name)] = marshal_json(obj, field_type) + + return params + + +def _populate_deep_object_query_params( + metadata: Dict, field_name: str, obj: Any, params: Dict[str, List[str]] +): + if obj is None: + return + + if is_dataclass(obj): + obj_fields: Tuple[Field, ...] = fields(obj) + for obj_field in obj_fields: + obj_param_metadata = obj_field.metadata.get("query_param") + if not obj_param_metadata: + continue + + obj_val = getattr(obj, obj_field.name) + if obj_val is None: + continue + + if isinstance(obj_val, List): + for val in obj_val: + if val is None: + continue + + if ( + params.get( + f'{metadata.get("field_name", field_name)}[{obj_param_metadata.get("field_name", obj_field.name)}]' + ) + is None + ): + params[ + f'{metadata.get("field_name", field_name)}[{obj_param_metadata.get("field_name", obj_field.name)}]' + ] = [] + + params[ + f'{metadata.get("field_name", field_name)}[{obj_param_metadata.get("field_name", obj_field.name)}]' + ].append(_val_to_string(val)) + else: + params[ + f'{metadata.get("field_name", field_name)}[{obj_param_metadata.get("field_name", obj_field.name)}]' + ] = [_val_to_string(obj_val)] + elif isinstance(obj, Dict): + for key, value in obj.items(): + if value is None: + continue + + if isinstance(value, List): + for val in value: + if val is None: + continue + + if ( + params.get(f'{metadata.get("field_name", field_name)}[{key}]') + is None + ): + params[f'{metadata.get("field_name", field_name)}[{key}]'] = [] + + params[f'{metadata.get("field_name", field_name)}[{key}]'].append( + _val_to_string(val) + ) + else: + params[f'{metadata.get("field_name", field_name)}[{key}]'] = [ + _val_to_string(value) + ] + + +def _get_query_param_field_name(obj_field: Field) -> str: + obj_param_metadata = obj_field.metadata.get("query_param") + + if not obj_param_metadata: + return "" + + return obj_param_metadata.get("field_name", obj_field.name) + + +def _populate_delimited_query_params( + metadata: Dict, + field_name: str, + obj: Any, + delimiter: str, + query_param_values: Dict[str, List[str]], +): + _populate_form( + field_name, + metadata.get("explode", True), + obj, + _get_query_param_field_name, + delimiter, + query_param_values, + ) + + +SERIALIZATION_METHOD_TO_CONTENT_TYPE = { + "json": "application/json", + "form": "application/x-www-form-urlencoded", + "multipart": "multipart/form-data", + "raw": "application/octet-stream", + "string": "text/plain", +} + + +def serialize_request_body( + request: Any, + request_type: type, + request_field_name: str, + nullable: bool, + optional: bool, + serialization_method: str, + encoder=None, +) -> Tuple[Optional[str], Optional[Any], Optional[Any]]: + if request is None: + if not nullable and optional: + return None, None, None + + if not is_dataclass(request) or not hasattr(request, request_field_name): + return serialize_content_type( + request_field_name, + request_type, + SERIALIZATION_METHOD_TO_CONTENT_TYPE[serialization_method], + request, + encoder, + ) + + request_val = getattr(request, request_field_name) + + if request_val is None: + if not nullable and optional: + return None, None, None + + request_fields: Tuple[Field, ...] = fields(request) + request_metadata = None + + for field in request_fields: + if field.name == request_field_name: + request_metadata = field.metadata.get("request") + break + + if request_metadata is None: + raise Exception("invalid request type") + + return serialize_content_type( + request_field_name, + request_type, + request_metadata.get("media_type", "application/octet-stream"), + request_val, + ) + + +def serialize_content_type( + field_name: str, request_type: Any, media_type: str, request: Any, encoder=None +) -> Tuple[Optional[str], Optional[Any], Optional[List[List[Any]]]]: + if re.match(r"(application|text)\/.*?\+*json.*", media_type) is not None: + return media_type, marshal_json(request, request_type, encoder), None + if re.match(r"multipart\/.*", media_type) is not None: + return serialize_multipart_form(media_type, request) + if re.match(r"application\/x-www-form-urlencoded.*", media_type) is not None: + return media_type, serialize_form_data(field_name, request), None + if isinstance(request, (bytes, bytearray)): + return media_type, request, None + if isinstance(request, str): + return media_type, request, None + + raise Exception( + f"invalid request body type {type(request)} for mediaType {media_type}" + ) + + +def serialize_multipart_form( + media_type: str, request: Any +) -> Tuple[str, Any, List[List[Any]]]: + form: List[List[Any]] = [] + request_fields = fields(request) + + for field in request_fields: + val = getattr(request, field.name) + if val is None: + continue + + field_metadata = field.metadata.get("multipart_form") + if not field_metadata: + continue + + if field_metadata.get("file") is True: + file_fields = fields(val) + + file_name = "" + field_name = "" + content = bytes() + + for file_field in file_fields: + file_metadata = file_field.metadata.get("multipart_form") + if file_metadata is None: + continue + + if file_metadata.get("content") is True: + content = getattr(val, file_field.name) + else: + field_name = file_metadata.get("field_name", file_field.name) + file_name = getattr(val, file_field.name) + if field_name == "" or file_name == "" or content == bytes(): + raise Exception("invalid multipart/form-data file") + + form.append([field_name, [file_name, content]]) + elif field_metadata.get("json") is True: + to_append = [ + field_metadata.get("field_name", field.name), + [None, marshal_json(val, field.type), "application/json"], + ] + form.append(to_append) + else: + field_name = field_metadata.get("field_name", field.name) + if isinstance(val, List): + for value in val: + if value is None: + continue + form.append([field_name + "[]", [None, _val_to_string(value)]]) + else: + form.append([field_name, [None, _val_to_string(val)]]) + return media_type, None, form + + +def serialize_dict( + original: Dict, explode: bool, field_name, existing: Optional[Dict[str, List[str]]] +) -> Dict[str, List[str]]: + if existing is None: + existing = {} + + if explode is True: + for key, val in original.items(): + if key not in existing: + existing[key] = [] + existing[key].append(val) + else: + temp = [] + for key, val in original.items(): + temp.append(str(key)) + temp.append(str(val)) + if field_name not in existing: + existing[field_name] = [] + existing[field_name].append(",".join(temp)) + return existing + + +def serialize_form_data(field_name: str, data: Any) -> Dict[str, Any]: + form: Dict[str, List[str]] = {} + + if is_dataclass(data): + for field in fields(data): + val = getattr(data, field.name) + if val is None: + continue + + metadata = field.metadata.get("form") + if metadata is None: + continue + + field_name = metadata.get("field_name", field.name) + + if metadata.get("json"): + form[field_name] = [marshal_json(val, field.type)] + else: + if metadata.get("style", "form") == "form": + _populate_form( + field_name, + metadata.get("explode", True), + val, + _get_form_field_name, + ",", + form, + ) + else: + raise Exception(f"Invalid form style for field {field.name}") + elif isinstance(data, Dict): + for key, value in data.items(): + form[key] = [_val_to_string(value)] + else: + raise Exception(f"Invalid request body type for field {field_name}") + + return form + + +def _get_form_field_name(obj_field: Field) -> str: + obj_param_metadata = obj_field.metadata.get("form") + + if not obj_param_metadata: + return "" + + return obj_param_metadata.get("field_name", obj_field.name) + + +def _populate_form( + field_name: str, + explode: boolean, + obj: Any, + get_field_name_func: Callable, + delimiter: str, + form: Dict[str, List[str]], +): + if obj is None: + return form + + if is_dataclass(obj): + items = [] + + obj_fields: Tuple[Field, ...] = fields(obj) + for obj_field in obj_fields: + obj_field_name = get_field_name_func(obj_field) + if obj_field_name == "": + continue + + val = getattr(obj, obj_field.name) + if val is None: + continue + + if explode: + form[obj_field_name] = [_val_to_string(val)] + else: + items.append(f"{obj_field_name}{delimiter}{_val_to_string(val)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, Dict): + items = [] + for key, value in obj.items(): + if value is None: + continue + + if explode: + form[key] = [_val_to_string(value)] + else: + items.append(f"{key}{delimiter}{_val_to_string(value)}") + + if len(items) > 0: + form[field_name] = [delimiter.join(items)] + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + if explode: + if not field_name in form: + form[field_name] = [] + form[field_name].append(_val_to_string(value)) + else: + items.append(_val_to_string(value)) + + if len(items) > 0: + form[field_name] = [delimiter.join([str(item) for item in items])] + else: + form[field_name] = [_val_to_string(obj)] + + return form + + +def _serialize_header(explode: bool, obj: Any) -> str: + if obj is None: + return "" + + if is_dataclass(obj): + items = [] + obj_fields: Tuple[Field, ...] = fields(obj) + for obj_field in obj_fields: + obj_param_metadata = obj_field.metadata.get("header") + + if not obj_param_metadata: + continue + + obj_field_name = obj_param_metadata.get("field_name", obj_field.name) + if obj_field_name == "": + continue + + val = getattr(obj, obj_field.name) + if val is None: + continue + + if explode: + items.append(f"{obj_field_name}={_val_to_string(val)}") + else: + items.append(obj_field_name) + items.append(_val_to_string(val)) + + if len(items) > 0: + return ",".join(items) + elif isinstance(obj, Dict): + items = [] + + for key, value in obj.items(): + if value is None: + continue + + if explode: + items.append(f"{key}={_val_to_string(value)}") + else: + items.append(key) + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join([str(item) for item in items]) + elif isinstance(obj, List): + items = [] + + for value in obj: + if value is None: + continue + + items.append(_val_to_string(value)) + + if len(items) > 0: + return ",".join(items) + else: + return f"{_val_to_string(obj)}" + + return "" + + +def unmarshal_json(data, typ, decoder=None, infer_missing=False): + unmarshal = make_dataclass("Unmarshal", [("res", typ)], bases=(DataClassJsonMixin,)) + json_dict = json.loads(data) + try: + out = unmarshal.from_dict({"res": json_dict}, infer_missing=infer_missing) + except AttributeError as attr_err: + raise AttributeError( + f"unable to unmarshal {data} as {typ} - {attr_err}" + ) from attr_err + + return out.res if decoder is None else decoder(out.res) + + +def marshal_json(val, typ, encoder=None): + if not is_optional_type(typ) and val is None: + raise ValueError(f"Could not marshal None into non-optional type: {typ}") + + marshal = make_dataclass("Marshal", [("res", typ)], bases=(DataClassJsonMixin,)) + marshaller = marshal(res=val) + json_dict = marshaller.to_dict() + val = json_dict["res"] if encoder is None else encoder(json_dict["res"]) + + return json.dumps(val, separators=(",", ":"), sort_keys=True) + + +def match_content_type(content_type: str, pattern: str) -> boolean: + if pattern in (content_type, "*", "*/*"): + return True + + msg = Message() + msg["content-type"] = content_type + media_type = msg.get_content_type() + + if media_type == pattern: + return True + + parts = media_type.split("/") + if len(parts) == 2: + if pattern in (f"{parts[0]}/*", f"*/{parts[1]}"): + return True + + return False + + +def match_status_codes(status_codes: List[str], status_code: int) -> bool: + for code in status_codes: + if code == str(status_code): + return True + + if code.endswith("XX") and code.startswith(str(status_code)[:1]): + return True + return False + + +def datetimeisoformat(optional: bool): + def isoformatoptional(val): + if optional and val is None: + return None + return _val_to_string(val) + + return isoformatoptional + + +def dateisoformat(optional: bool): + def isoformatoptional(val): + if optional and val is None: + return None + return date.isoformat(val) + + return isoformatoptional + + +def datefromisoformat(date_str: str): + return dateutil.parser.parse(date_str).date() + + +def bigintencoder(optional: bool): + def bigintencode(val: int): + if optional and val is None: + return None + return str(val) + + return bigintencode + + +def bigintdecoder(val): + if isinstance(val, float): + raise ValueError(f"{val} is a float") + return int(val) + +def integerstrencoder(optional: bool): + def integerstrencode(val: int): + if optional and val is None: + return None + return str(val) + + return integerstrencode + + +def integerstrdecoder(val): + if isinstance(val, float): + raise ValueError(f"{val} is a float") + return int(val) + + +def numberstrencoder(optional: bool): + def numberstrencode(val: float): + if optional and val is None: + return None + return str(val) + + return numberstrencode + + +def numberstrdecoder(val): + return float(val) + + +def decimalencoder(optional: bool, as_str: bool): + def decimalencode(val: Decimal): + if optional and val is None: + return None + + if as_str: + return str(val) + + return float(val) + + return decimalencode + + +def decimaldecoder(val): + return Decimal(str(val)) + + +def map_encoder(optional: bool, value_encoder: Callable): + def map_encode(val: Dict): + if optional and val is None: + return None + + encoded = {} + for key, value in val.items(): + encoded[key] = value_encoder(value) + + return encoded + + return map_encode + + +def map_decoder(value_decoder: Callable): + def map_decode(val: Dict): + decoded = {} + for key, value in val.items(): + decoded[key] = value_decoder(value) + + return decoded + + return map_decode + + +def list_encoder(optional: bool, value_encoder: Callable): + def list_encode(val: List): + if optional and val is None: + return None + + encoded = [] + for value in val: + encoded.append(value_encoder(value)) + + return encoded + + return list_encode + + +def list_decoder(value_decoder: Callable): + def list_decode(val: List): + decoded = [] + for value in val: + decoded.append(value_decoder(value)) + + return decoded + + return list_decode + + +def union_encoder(all_encoders: Dict[str, Callable]): + def selective_encoder(val: Any): + if type(val) in all_encoders: + return all_encoders[type(val)](val) + return val + + return selective_encoder + + +def union_decoder(all_decoders: List[Callable]): + def selective_decoder(val: Any): + decoded = val + for decoder in all_decoders: + try: + decoded = decoder(val) + break + except (TypeError, ValueError): + continue + return decoded + + return selective_decoder + + +def get_field_name(name): + def override(_, _field_name=name): + return _field_name + + return override + + +def _val_to_string(val) -> str: + if isinstance(val, bool): + return str(val).lower() + if isinstance(val, datetime): + return str(val.isoformat().replace("+00:00", "Z")) + if isinstance(val, Enum): + return str(val.value) + + return str(val) + + +def _populate_from_globals( + param_name: str, value: Any, param_type: str, gbls: Any +) -> Tuple[Any, bool]: + if gbls is None: + return value, False + + global_fields = fields(gbls) + + found = False + for field in global_fields: + if field.name is not param_name: + continue + + found = True + + if value is not None: + return value, True + + global_value = getattr(gbls, field.name) + + param_metadata = field.metadata.get(param_type) + if param_metadata is None: + return value, True + + return global_value, True + + return value, found + + +def decoder_with_discriminator(field_name): + def decode_fx(obj): + kls = getattr(sys.modules["sdk.models.components"], obj[field_name]) + return unmarshal_json(json.dumps(obj), kls) + + return decode_fx + + +def remove_suffix(input_string, suffix): + if suffix and input_string.endswith(suffix): + return input_string[: -len(suffix)] + return input_string