Skip to content

Commit 602de55

Browse files
authored
Move AI SDK to opensource (#483)
## Summary Copies over the code for AI SDK to our open source repo. Once this is in, I will delete the AI SDK from Axiom so that it can start using the open source version. ## How was it tested? Ran the unit tests. ## Community Contribution License All community contributions in this pull request are licensed to the project maintainers under the terms of the [Apache 2 License](https://www.apache.org/licenses/LICENSE-2.0). By creating this pull request I represent that I have the right to license the contributions to the project maintainers under the Apache 2 License as stated in the [Community Contribution License](https://github.com/jetify-com/opensource/blob/main/CONTRIBUTING.md#community-contribution-license).
1 parent 1d9f65f commit 602de55

File tree

104 files changed

+22228
-1
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

104 files changed

+22228
-1
lines changed

.golangci.yml

+7-1
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,12 @@ issues:
4242
linters:
4343
- dupl
4444

45+
# Disable only cognitive-complexity check in test files
46+
- path: _test\.go
47+
linters:
48+
- revive
49+
text: "cognitive-complexity:"
50+
4551
# TODO(gcurtis): temporary until this file is used.
4652
- path: terminal.*\.go
4753
linters:
@@ -60,7 +66,7 @@ linters-settings:
6066
- name: bool-literal-in-expr
6167
- name: cognitive-complexity
6268
arguments:
63-
- 27
69+
- 30
6470
- name: datarace
6571
- name: duplicated-imports
6672
- name: early-return

aisdk/ai/aisdk.go

+199
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,199 @@
1+
package aisdk
2+
3+
import (
4+
"context"
5+
6+
"go.jetify.com/ai/api"
7+
)
8+
9+
// GenerateText generates a text response for a given prompt using a language model.
10+
// This function does not stream its output.
11+
//
12+
// It returns a [api.Response] containing the generated text, the results of
13+
// any tool calls, and additional information.
14+
//
15+
// It supports either a string argument, which will be converted to a
16+
// [api.UserMessage] with
17+
//
18+
// GenerateText(ctx, "Hello, world!")
19+
//
20+
// Or a series of [api.Message] arguments:
21+
//
22+
// GenerateText(ctx,
23+
// UserMessage("Show me a picture of a cat"),
24+
// AssistantMessage(
25+
// "Here is a picture of a cat",
26+
// ImageBlock{URL: "https://example.com/cat.png"},
27+
// ),
28+
// )
29+
//
30+
// The last argument can optionally be a series of [api.CallOption] arguments:
31+
//
32+
// GenerateText(ctx, "Hello, world!", WithMaxTokens(100))
33+
func GenerateText(ctx context.Context, args ...any) (api.Response, error) {
34+
llmArgs, err := toLLMArgs(args...)
35+
if err != nil {
36+
return api.Response{}, err
37+
}
38+
39+
return generate(ctx, llmArgs.Prompt, llmArgs.Config)
40+
}
41+
42+
func generate(ctx context.Context, prompt []api.Message, config GenerateTextConfig) (api.Response, error) {
43+
return config.Model.Generate(ctx, prompt, config.CallOptions)
44+
}
45+
46+
type GenerateTextConfig struct {
47+
CallOptions api.CallOptions
48+
Model api.LanguageModel
49+
}
50+
51+
// GenerateOption is a function that modifies GenerateConfig.
52+
type GenerateOption func(*GenerateTextConfig)
53+
54+
// WithModel sets the language model to use for generation
55+
func WithModel(model api.LanguageModel) GenerateOption {
56+
return func(o *GenerateTextConfig) {
57+
o.Model = model
58+
}
59+
}
60+
61+
// WithMaxTokens specifies the maximum number of tokens to generate
62+
func WithMaxTokens(maxTokens int) GenerateOption {
63+
return func(o *GenerateTextConfig) {
64+
o.CallOptions.MaxTokens = maxTokens
65+
}
66+
}
67+
68+
// WithTemperature controls randomness in the model's output.
69+
// It is recommended to set either Temperature or TopP, but not both.
70+
func WithTemperature(temperature float64) GenerateOption {
71+
return func(o *GenerateTextConfig) {
72+
o.CallOptions.Temperature = &temperature
73+
}
74+
}
75+
76+
// WithStopSequences specifies sequences that will stop generation when produced.
77+
// Providers may have limits on the number of stop sequences.
78+
func WithStopSequences(stopSequences ...string) GenerateOption {
79+
return func(o *GenerateTextConfig) {
80+
o.CallOptions.StopSequences = stopSequences
81+
}
82+
}
83+
84+
// WithTopP controls nucleus sampling.
85+
// It is recommended to set either Temperature or TopP, but not both.
86+
func WithTopP(topP float64) GenerateOption {
87+
return func(o *GenerateTextConfig) {
88+
o.CallOptions.TopP = topP
89+
}
90+
}
91+
92+
// WithTopK limits sampling to the top K options for each token.
93+
// Used to remove "long tail" low probability responses.
94+
// Recommended for advanced use cases only.
95+
func WithTopK(topK int) GenerateOption {
96+
return func(o *GenerateTextConfig) {
97+
o.CallOptions.TopK = topK
98+
}
99+
}
100+
101+
// WithPresencePenalty affects the likelihood of the model repeating
102+
// information that is already in the prompt
103+
func WithPresencePenalty(penalty float64) GenerateOption {
104+
return func(o *GenerateTextConfig) {
105+
o.CallOptions.PresencePenalty = penalty
106+
}
107+
}
108+
109+
// WithFrequencyPenalty affects the likelihood of the model
110+
// repeatedly using the same words or phrases
111+
func WithFrequencyPenalty(penalty float64) GenerateOption {
112+
return func(o *GenerateTextConfig) {
113+
o.CallOptions.FrequencyPenalty = penalty
114+
}
115+
}
116+
117+
// WithResponseFormat specifies whether the output should be text or JSON.
118+
// For JSON output, a schema can optionally guide the model.
119+
func WithResponseFormat(format *api.ResponseFormat) GenerateOption {
120+
return func(o *GenerateTextConfig) {
121+
o.CallOptions.ResponseFormat = format
122+
}
123+
}
124+
125+
// WithSeed provides an integer seed for random sampling.
126+
// If supported by the model, calls will generate deterministic results.
127+
func WithSeed(seed int) GenerateOption {
128+
return func(o *GenerateTextConfig) {
129+
o.CallOptions.Seed = seed
130+
}
131+
}
132+
133+
// WithHeaders specifies additional HTTP headers to send with the request.
134+
// Only applicable for HTTP-based providers.
135+
func WithHeaders(headers map[string]string) GenerateOption {
136+
return func(o *GenerateTextConfig) {
137+
o.CallOptions.Headers = headers
138+
}
139+
}
140+
141+
// WithInputFormat specifies whether the user provided the input as messages or as a prompt.
142+
// This can help guide non-chat models in the expansion, as different expansions
143+
// may be needed for chat vs non-chat use cases.
144+
func WithInputFormat(format api.InputFormat) GenerateOption {
145+
return func(o *GenerateTextConfig) {
146+
o.CallOptions.InputFormat = format
147+
}
148+
}
149+
150+
// WithMode affects the behavior of the language model. It is required to
151+
// support provider-independent streaming and generation of structured objects.
152+
// The model can take this information and e.g. configure json mode, the correct
153+
// low level grammar, etc. It can also be used to optimize the efficiency of the
154+
// streaming, e.g. tool-delta stream parts are only needed in the
155+
// object-tool mode.
156+
//
157+
// Mode will be removed in v2, and at that point it will be deprecated.
158+
// All necessary settings will be directly supported through the call settings,
159+
// in particular responseFormat, toolChoice, and tools.
160+
func WithMode(mode api.ModeConfig) GenerateOption {
161+
return func(o *GenerateTextConfig) {
162+
o.CallOptions.Mode = mode
163+
}
164+
}
165+
166+
// WithProviderMetadata sets additional provider-specific metadata.
167+
// The metadata is passed through to the provider from the AI SDK and enables
168+
// provider-specific functionality that can be fully encapsulated in the provider.
169+
func WithProviderMetadata(providerName string, metadata any) GenerateOption {
170+
return func(o *GenerateTextConfig) {
171+
if o.CallOptions.ProviderMetadata == nil {
172+
o.CallOptions.ProviderMetadata = api.NewProviderMetadata(map[string]any{})
173+
}
174+
o.CallOptions.ProviderMetadata.Set(providerName, metadata)
175+
}
176+
}
177+
178+
// WithTools specifies the tools available to the model during generation.
179+
func WithTools(tools ...api.ToolDefinition) GenerateOption {
180+
return func(o *GenerateTextConfig) {
181+
o.CallOptions.Mode = api.RegularMode{Tools: tools}
182+
}
183+
}
184+
185+
// buildGenerateConfig combines multiple generate options into a single GenerateConfig struct.
186+
func buildGenerateConfig(opts []GenerateOption) GenerateTextConfig {
187+
config := GenerateTextConfig{
188+
CallOptions: api.CallOptions{
189+
InputFormat: "prompt", // default
190+
Mode: api.RegularMode{}, // default
191+
ProviderMetadata: api.NewProviderMetadata(map[string]any{}),
192+
},
193+
Model: DefaultLanguageModel(),
194+
}
195+
for _, opt := range opts {
196+
opt(&config)
197+
}
198+
return config
199+
}

0 commit comments

Comments
 (0)