mirror of
https://github.com/LukeHagar/log10go.git
synced 2025-12-06 04:20:12 +00:00
198 lines
5.1 KiB
Go
198 lines
5.1 KiB
Go
// Code generated by Speakeasy (https://speakeasyapi.dev). DO NOT EDIT.
|
|
|
|
package components
|
|
|
|
import (
|
|
"encoding/json"
|
|
"fmt"
|
|
)
|
|
|
|
// FinishReason - The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
|
|
// `length` if the maximum number of tokens specified in the request was reached,
|
|
// `content_filter` if content was omitted due to a flag from our content filters,
|
|
// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.
|
|
type FinishReason string
|
|
|
|
const (
|
|
FinishReasonStop FinishReason = "stop"
|
|
FinishReasonLength FinishReason = "length"
|
|
FinishReasonToolCalls FinishReason = "tool_calls"
|
|
FinishReasonContentFilter FinishReason = "content_filter"
|
|
FinishReasonFunctionCall FinishReason = "function_call"
|
|
)
|
|
|
|
func (e FinishReason) ToPointer() *FinishReason {
|
|
return &e
|
|
}
|
|
func (e *FinishReason) UnmarshalJSON(data []byte) error {
|
|
var v string
|
|
if err := json.Unmarshal(data, &v); err != nil {
|
|
return err
|
|
}
|
|
switch v {
|
|
case "stop":
|
|
fallthrough
|
|
case "length":
|
|
fallthrough
|
|
case "tool_calls":
|
|
fallthrough
|
|
case "content_filter":
|
|
fallthrough
|
|
case "function_call":
|
|
*e = FinishReason(v)
|
|
return nil
|
|
default:
|
|
return fmt.Errorf("invalid value for FinishReason: %v", v)
|
|
}
|
|
}
|
|
|
|
// Logprobs - Log probability information for the choice.
|
|
type Logprobs struct {
|
|
// A list of message content tokens with log probability information.
|
|
Content []ChatCompletionTokenLogprob `json:"content"`
|
|
}
|
|
|
|
func (o *Logprobs) GetContent() []ChatCompletionTokenLogprob {
|
|
if o == nil {
|
|
return nil
|
|
}
|
|
return o.Content
|
|
}
|
|
|
|
type Choices struct {
|
|
// The reason the model stopped generating tokens. This will be `stop` if the model hit a natural stop point or a provided stop sequence,
|
|
// `length` if the maximum number of tokens specified in the request was reached,
|
|
// `content_filter` if content was omitted due to a flag from our content filters,
|
|
// `tool_calls` if the model called a tool, or `function_call` (deprecated) if the model called a function.
|
|
//
|
|
FinishReason FinishReason `json:"finish_reason"`
|
|
// The index of the choice in the list of choices.
|
|
Index int64 `json:"index"`
|
|
// A chat completion message generated by the model.
|
|
Message ChatCompletionResponseMessage `json:"message"`
|
|
// Log probability information for the choice.
|
|
Logprobs *Logprobs `json:"logprobs"`
|
|
}
|
|
|
|
func (o *Choices) GetFinishReason() FinishReason {
|
|
if o == nil {
|
|
return FinishReason("")
|
|
}
|
|
return o.FinishReason
|
|
}
|
|
|
|
func (o *Choices) GetIndex() int64 {
|
|
if o == nil {
|
|
return 0
|
|
}
|
|
return o.Index
|
|
}
|
|
|
|
func (o *Choices) GetMessage() ChatCompletionResponseMessage {
|
|
if o == nil {
|
|
return ChatCompletionResponseMessage{}
|
|
}
|
|
return o.Message
|
|
}
|
|
|
|
func (o *Choices) GetLogprobs() *Logprobs {
|
|
if o == nil {
|
|
return nil
|
|
}
|
|
return o.Logprobs
|
|
}
|
|
|
|
// Object - The object type, which is always `chat.completion`.
|
|
type Object string
|
|
|
|
const (
|
|
ObjectChatCompletion Object = "chat.completion"
|
|
)
|
|
|
|
func (e Object) ToPointer() *Object {
|
|
return &e
|
|
}
|
|
func (e *Object) UnmarshalJSON(data []byte) error {
|
|
var v string
|
|
if err := json.Unmarshal(data, &v); err != nil {
|
|
return err
|
|
}
|
|
switch v {
|
|
case "chat.completion":
|
|
*e = Object(v)
|
|
return nil
|
|
default:
|
|
return fmt.Errorf("invalid value for Object: %v", v)
|
|
}
|
|
}
|
|
|
|
// CreateChatCompletionResponse - Represents a chat completion response returned by model, based on the provided input.
|
|
type CreateChatCompletionResponse struct {
|
|
// A unique identifier for the chat completion.
|
|
ID string `json:"id"`
|
|
// A list of chat completion choices. Can be more than one if `n` is greater than 1.
|
|
Choices []Choices `json:"choices"`
|
|
// The Unix timestamp (in seconds) of when the chat completion was created.
|
|
Created int64 `json:"created"`
|
|
// The model used for the chat completion.
|
|
Model string `json:"model"`
|
|
// This fingerprint represents the backend configuration that the model runs with.
|
|
//
|
|
// Can be used in conjunction with the `seed` request parameter to understand when backend changes have been made that might impact determinism.
|
|
//
|
|
SystemFingerprint *string `json:"system_fingerprint,omitempty"`
|
|
// The object type, which is always `chat.completion`.
|
|
Object Object `json:"object"`
|
|
// Usage statistics for the completion request.
|
|
Usage *CompletionUsage `json:"usage,omitempty"`
|
|
}
|
|
|
|
func (o *CreateChatCompletionResponse) GetID() string {
|
|
if o == nil {
|
|
return ""
|
|
}
|
|
return o.ID
|
|
}
|
|
|
|
func (o *CreateChatCompletionResponse) GetChoices() []Choices {
|
|
if o == nil {
|
|
return []Choices{}
|
|
}
|
|
return o.Choices
|
|
}
|
|
|
|
func (o *CreateChatCompletionResponse) GetCreated() int64 {
|
|
if o == nil {
|
|
return 0
|
|
}
|
|
return o.Created
|
|
}
|
|
|
|
func (o *CreateChatCompletionResponse) GetModel() string {
|
|
if o == nil {
|
|
return ""
|
|
}
|
|
return o.Model
|
|
}
|
|
|
|
func (o *CreateChatCompletionResponse) GetSystemFingerprint() *string {
|
|
if o == nil {
|
|
return nil
|
|
}
|
|
return o.SystemFingerprint
|
|
}
|
|
|
|
func (o *CreateChatCompletionResponse) GetObject() Object {
|
|
if o == nil {
|
|
return Object("")
|
|
}
|
|
return o.Object
|
|
}
|
|
|
|
func (o *CreateChatCompletionResponse) GetUsage() *CompletionUsage {
|
|
if o == nil {
|
|
return nil
|
|
}
|
|
return o.Usage
|
|
}
|