Skip to content

Commit 2f8d490

Browse files
committed
first take relace provider
1 parent 9889b46 commit 2f8d490

File tree

11 files changed

+184
-66
lines changed

11 files changed

+184
-66
lines changed

core/llm/autodetect.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,6 +58,7 @@ const PROVIDER_HANDLES_TEMPLATING: string[] = [
5858
"vertexai",
5959
"watsonx",
6060
"nebius",
61+
"relace",
6162
];
6263

6364
const PROVIDER_SUPPORTS_IMAGES: string[] = [

core/llm/llms/Relace.ts

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import { LLMOptions } from "../..";
2+
import { LlmApiRequestType } from "../openaiTypeConverters";
3+
4+
import OpenAI from "./OpenAI";
5+
6+
export class Relace extends OpenAI {
7+
static providerName = "relace";
8+
static defaultOptions: Partial<LLMOptions> | undefined = {
9+
apiBase: "https://instantapply.endpoint.relace.run/v1/code/apply",
10+
};
11+
protected useOpenAIAdapterFor: (LlmApiRequestType | "*")[] = ["*"];
12+
}

core/llm/llms/index.ts

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,6 @@ import Cohere from "./Cohere";
1919
import DeepInfra from "./DeepInfra";
2020
import Deepseek from "./Deepseek";
2121
import Fireworks from "./Fireworks";
22-
import NCompass from "./NCompass";
2322
import Flowise from "./Flowise";
2423
import FreeTrial from "./FreeTrial";
2524
import FunctionNetwork from "./FunctionNetwork";
@@ -35,11 +34,14 @@ import Mistral from "./Mistral";
3534
import MockLLM from "./Mock";
3635
import Moonshot from "./Moonshot";
3736
import Msty from "./Msty";
37+
import NCompass from "./NCompass";
3838
import Nebius from "./Nebius";
39+
import Novita from "./Novita";
3940
import Nvidia from "./Nvidia";
4041
import Ollama from "./Ollama";
4142
import OpenAI from "./OpenAI";
4243
import OpenRouter from "./OpenRouter";
44+
import { Relace } from "./Relace";
4345
import Replicate from "./Replicate";
4446
import SageMaker from "./SageMaker";
4547
import SambaNova from "./SambaNova";
@@ -49,7 +51,6 @@ import ContinueProxy from "./stubs/ContinueProxy";
4951
import TestLLM from "./Test";
5052
import TextGenWebUI from "./TextGenWebUI";
5153
import Together from "./Together";
52-
import Novita from "./Novita";
5354
import VertexAI from "./VertexAI";
5455
import Vllm from "./Vllm";
5556
import WatsonX from "./WatsonX";
@@ -102,6 +103,7 @@ export const LLMClasses = [
102103
xAI,
103104
SiliconFlow,
104105
Scaleway,
106+
Relace,
105107
];
106108

107109
export async function llmFromDescription(

core/package-lock.json

Lines changed: 25 additions & 56 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

core/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@
4949
"@continuedev/config-yaml": "^1.0.63",
5050
"@continuedev/fetch": "^1.0.4",
5151
"@continuedev/llm-info": "^1.0.2",
52-
"@continuedev/openai-adapters": "^1.0.10",
52+
"@continuedev/openai-adapters": "file:../packages/openai-adapters",
5353
"@modelcontextprotocol/sdk": "^1.5.0",
5454
"@mozilla/readability": "^0.5.0",
5555
"@octokit/rest": "^20.1.1",

extensions/vscode/config_schema.json

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -215,7 +215,8 @@
215215
"moonshot",
216216
"siliconflow",
217217
"function-network",
218-
"scaleway"
218+
"scaleway",
219+
"relace"
219220
],
220221
"markdownEnumDescriptions": [
221222
"### OpenAI\nUse gpt-4, gpt-3.5-turbo, or any other OpenAI model. See [here](https://openai.com/product#made-for-developers) to obtain an API key.\n\n> [Reference](https://docs.continue.dev/reference/Model%20Providers/openai)",
@@ -261,7 +262,8 @@
261262
"### Moonshot\nTo get started with Moonshot AI, obtain your API key from [Moonshot AI](https://platform.moonshot.cn/). Moonshot AI provides high-quality large language models with competitive pricing.\n> [Reference](https://platform.moonshot.cn/docs/api)",
262263
"### SiliconFlow\nTo get started with SiliconFlow, obtain your API key from [SiliconCloud](https://cloud.siliconflow.cn/account/ak). SiliconCloud provides cost-effective GenAI services based on excellent open source basic models.\n> [Models](https://siliconflow.cn/zh-cn/models)",
263264
"### Function Network offers private, affordable user-owned AI\nTo get started with Function Network, obtain your API key from [Function Network](https://www.function.network/join-waitlist). Function Network provides a variety of models for chat, completion, and embeddings.",
264-
"### Scaleway\n Generative APIs are serverless endpoints for the most popular AI models.\nHosted in European data centers and priced competitively per million tokens used, models served by Scaleway are ideal for users requiring low latency, full data privacy, and 100% compliance with EU AI Act. To get access to the Scaleway Generative APIs, read the [Quickstart guide](https://www.scaleway.com/en/docs/ai-data/generative-apis/quickstart/) and get a [valid API key](https://www.scaleway.com/en/docs/identity-and-access-management/iam/how-to/create-api-keys/)."
265+
"### Scaleway\n Generative APIs are serverless endpoints for the most popular AI models.\nHosted in European data centers and priced competitively per million tokens used, models served by Scaleway are ideal for users requiring low latency, full data privacy, and 100% compliance with EU AI Act. To get access to the Scaleway Generative APIs, read the [Quickstart guide](https://www.scaleway.com/en/docs/ai-data/generative-apis/quickstart/) and get a [valid API key](https://www.scaleway.com/en/docs/identity-and-access-management/iam/how-to/create-api-keys/).",
266+
"### Relace\n Relace provides a fast apply model. To get started, obtain an API key from [here](https://app.relace.ai/settings/api-keys)."
265267
],
266268
"type": "string"
267269
},

extensions/vscode/package-lock.json

Lines changed: 4 additions & 4 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

gui/package-lock.json

Lines changed: 1 addition & 1 deletion
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.
Lines changed: 128 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,128 @@
1+
import {
2+
Completion,
3+
CompletionCreateParamsNonStreaming,
4+
CompletionCreateParamsStreaming,
5+
} from "openai/resources/completions.mjs";
6+
import {
7+
CreateEmbeddingResponse,
8+
EmbeddingCreateParams,
9+
} from "openai/resources/embeddings.mjs";
10+
import {
11+
ChatCompletion,
12+
ChatCompletionChunk,
13+
ChatCompletionCreateParamsNonStreaming,
14+
ChatCompletionCreateParamsStreaming,
15+
} from "openai/resources/index.mjs";
16+
import { Model } from "openai/resources/models.mjs";
17+
import { z } from "zod";
18+
import { OpenAIConfigSchema } from "../types.js";
19+
import { chatChunk, customFetch } from "../util.js";
20+
import {
21+
BaseLlmApi,
22+
CreateRerankResponse,
23+
FimCreateParamsStreaming,
24+
RerankCreateParams,
25+
} from "./base.js";
26+
27+
// Relace only supports apply through a /v1/apply endpoint
28+
export class RelaceApi implements BaseLlmApi {
29+
private apiBase = "https://instantapply.endpoint.relace.run/v1/code/apply";
30+
31+
constructor(private readonly config: z.infer<typeof OpenAIConfigSchema>) {
32+
this.apiBase = config.apiBase ?? this.apiBase;
33+
this.config = config;
34+
}
35+
36+
chatCompletionNonStream(
37+
body: ChatCompletionCreateParamsNonStreaming,
38+
signal: AbortSignal,
39+
): Promise<ChatCompletion> {
40+
throw new Error(
41+
"Relace provider does not support non-streaming chat completion.",
42+
);
43+
}
44+
45+
// We convert from what would be sent to OpenAI (a prediction for the existing code and a user message with the new code)
46+
// to Relace's format
47+
async *chatCompletionStream(
48+
body: ChatCompletionCreateParamsStreaming,
49+
signal: AbortSignal,
50+
): AsyncGenerator<ChatCompletionChunk> {
51+
const fetch = customFetch(this.config.requestOptions);
52+
const headers = {
53+
"Content-Type": "application/json",
54+
Authorization: `Bearer ${this.config.apiKey}`,
55+
};
56+
57+
const prediction = body.prediction?.content ?? "";
58+
const initialCode =
59+
typeof prediction === "string"
60+
? prediction
61+
: prediction.map((p) => p.text).join("");
62+
63+
const userContent = body.messages.find((m) => m.role === "user")?.content;
64+
if (!userContent) {
65+
throw new Error("No edit snippet provided.");
66+
}
67+
68+
const editSnippet =
69+
typeof userContent === "string"
70+
? userContent
71+
: userContent
72+
.filter((p) => p.type === "text")
73+
.map((p) => p.text)
74+
.join("");
75+
76+
const data = {
77+
initialCode,
78+
editSnippet,
79+
};
80+
81+
const response = await fetch(this.apiBase, {
82+
method: "POST",
83+
headers,
84+
body: JSON.stringify(data),
85+
signal,
86+
});
87+
88+
const result = (await response.json()) as any;
89+
const mergedCode = result.mergedCode;
90+
91+
yield chatChunk({
92+
content: mergedCode,
93+
model: body.model,
94+
});
95+
}
96+
97+
completionNonStream(
98+
body: CompletionCreateParamsNonStreaming,
99+
signal: AbortSignal,
100+
): Promise<Completion> {
101+
throw new Error(
102+
"Relace provider does not support non-streaming completion.",
103+
);
104+
}
105+
completionStream(
106+
body: CompletionCreateParamsStreaming,
107+
signal: AbortSignal,
108+
): AsyncGenerator<Completion> {
109+
throw new Error("Relace provider does not support streaming completion.");
110+
}
111+
fimStream(
112+
body: FimCreateParamsStreaming,
113+
signal: AbortSignal,
114+
): AsyncGenerator<ChatCompletionChunk> {
115+
throw new Error(
116+
"Relace provider does not support streaming FIM completion.",
117+
);
118+
}
119+
embed(body: EmbeddingCreateParams): Promise<CreateEmbeddingResponse> {
120+
throw new Error("Relace provider does not support embeddings.");
121+
}
122+
rerank(body: RerankCreateParams): Promise<CreateRerankResponse> {
123+
throw new Error("Relace provider does not support reranking.");
124+
}
125+
list(): Promise<Model[]> {
126+
throw new Error("Relace provider does not support model listing.");
127+
}
128+
}

packages/openai-adapters/src/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import { JinaApi } from "./apis/Jina.js";
1010
import { MockApi } from "./apis/Mock.js";
1111
import { MoonshotApi } from "./apis/Moonshot.js";
1212
import { OpenAIApi } from "./apis/OpenAI.js";
13+
import { RelaceApi } from "./apis/Relace.js";
1314
import { LLMConfig, OpenAIConfigSchema } from "./types.js";
1415

1516
dotenv.config();
@@ -42,6 +43,8 @@ export function constructLlmApi(config: LLMConfig): BaseLlmApi | undefined {
4243
return new DeepSeekApi(config);
4344
case "moonshot":
4445
return new MoonshotApi(config);
46+
case "relace":
47+
return new RelaceApi(config);
4548
case "x-ai":
4649
return openAICompatible("https://api.x.ai/v1/", config);
4750
case "voyage":

0 commit comments

Comments
 (0)