Skip to content

Commit 5290f6a

Browse files
committed
Fix continuedev#4723/continuedev#4774: Add error handling for system prompt
+ Ensure that Claude models on VertexAI send correct prompts, even if the systemMessage is undefined. Signed-off-by: "Michael Elder" @mdelder
1 parent 5868fb8 commit 5290f6a

File tree

3 files changed

+150
-20
lines changed

3 files changed

+150
-20
lines changed

core/llm/llms/Gemini.ts

+63-12
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ import {
44
CompletionOptions,
55
LLMOptions,
66
MessagePart,
7+
TextMessagePart,
78
ToolCallDelta,
89
} from "../../index.js";
910
import { findLast } from "../../util/findLast.js";
@@ -69,21 +70,71 @@ class Gemini extends BaseLLM {
6970
}
7071
}
7172

72-
public removeSystemMessage(messages: ChatMessage[]): ChatMessage[] {
73-
// should be public for use within VertexAI
74-
const msgs = [...messages];
75-
76-
if (msgs[0]?.role === "system") {
77-
const sysMsg = msgs.shift()?.content;
78-
// @ts-ignore
79-
if (msgs[0]?.role === "user") {
80-
// @ts-ignore
81-
msgs[0].content = `System message - follow these instructions in every response: ${sysMsg}\n\n---\n\n${msgs[0].content}`;
73+
/**
74+
* Removes the system message and merges it with the next user message if present.
75+
* @param messages Array of chat messages
76+
* @returns Modified array with system message merged into user message if applicable
77+
*/
78+
public removeSystemMessage(messages: ChatMessage[]): ChatMessage[] {
79+
// If no messages or first message isn't system, return copy of original messages
80+
if (messages.length === 0 || messages[0]?.role !== "system") {
81+
return [...messages];
82+
}
83+
84+
// Extract system message
85+
const systemMessage : ChatMessage = messages[0];
86+
87+
// Extract system content based on its type
88+
let systemContent = "";
89+
if (typeof systemMessage.content === "string") {
90+
systemContent = systemMessage.content;
91+
} else if (Array.isArray(systemMessage.content)) {
92+
const contentArray : Array<MessagePart> = systemMessage.content as Array<MessagePart>;
93+
const concatenatedText = contentArray
94+
.filter(part => part.type === "text")
95+
.map(part => part.text)
96+
.join(" ");
97+
systemContent = concatenatedText ? concatenatedText : "";
98+
} else if (systemMessage.content && typeof systemMessage.content === "object") {
99+
const typedContent = systemMessage.content as TextMessagePart;
100+
systemContent = typedContent?.text || "";
101+
}
102+
103+
// Create new array without the system message
104+
const remainingMessages : ChatMessage[] = messages.slice(1);
105+
106+
// Check if there's a user message to merge with
107+
if (remainingMessages.length > 0 && remainingMessages[0].role === "user") {
108+
const userMessage : ChatMessage = remainingMessages[0];
109+
const prefix = `System message - follow these instructions in every response: ${systemContent}\n\n---\n\n`;
110+
111+
// Merge based on user content type
112+
if (typeof userMessage.content === "string") {
113+
userMessage.content = prefix + userMessage.content;
114+
} else if (Array.isArray(userMessage.content)) {
115+
const contentArray : Array<MessagePart> = userMessage.content as Array<MessagePart>;
116+
const textPart = contentArray.find(part => part.type === "text") as TextMessagePart | undefined;
117+
118+
if (textPart) {
119+
textPart.text = prefix + textPart.text;
120+
} else {
121+
userMessage.content.push({
122+
type: "text",
123+
text: prefix
124+
} as TextMessagePart);
82125
}
126+
} else if (userMessage.content && typeof userMessage.content === "object") {
127+
const typedContent = userMessage.content as TextMessagePart;
128+
userMessage.content = [{
129+
type: "text",
130+
text: prefix + (typedContent.text || "")
131+
} as TextMessagePart];
83132
}
84-
85-
return msgs;
86133
}
134+
135+
return remainingMessages;
136+
}
137+
87138

88139
protected async *_streamChat(
89140
messages: ChatMessage[],

core/llm/llms/VertexAI.ts

+78-6
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,15 @@ class VertexAI extends BaseLLM {
8282

8383
// Remove the `model` property and add `anthropic_version`
8484
const { model, ...finalOptions } = convertedArgs;
85+
86+
// Add tool support - convert tools to Anthropic's format
87+
if (options.tools) {
88+
finalOptions.tools = options.tools.map(tool => ({
89+
name: tool.function.name,
90+
description: tool.function?.description,
91+
input_schema: tool.function?.parameters
92+
}));
93+
}
8594
return {
8695
...finalOptions,
8796
anthropic_version: "vertex-2023-10-16",
@@ -123,25 +132,88 @@ class VertexAI extends BaseLLM {
123132
},
124133
]
125134
: systemMessage,
135+
// Add tools if they exist in options
136+
...(options.tools ? {
137+
tools: options.tools.map(tool => ({
138+
name: tool.function.name,
139+
description: tool.function.description,
140+
input_schema: tool.function.parameters
141+
}))
142+
} : {})
126143
}),
127144
});
128145

129146
if (options.stream === false) {
130147
const data = await response.json();
131-
yield { role: "assistant", content: data.content[0].text };
148+
149+
// Check if the response contains a tool call
150+
if (data.content && data.content.length > 0) {
151+
const contentItem = data.content[0];
152+
153+
if (contentItem.type === "tool_use") {
154+
// Handle tool calls in non-streaming mode
155+
yield {
156+
role: "assistant",
157+
content: "",
158+
toolCalls: [{
159+
id: contentItem.id || `call_${Date.now()}`,
160+
type: "function",
161+
function: {
162+
name: contentItem.name,
163+
arguments: JSON.stringify(contentItem.input)
164+
}
165+
}]
166+
};
167+
} else {
168+
// Regular text response
169+
yield { role: "assistant", content: contentItem.text || "" };
170+
}
171+
}
132172
return;
133173
}
134174

175+
// For streaming responses
176+
let currentToolCall = null;
177+
135178
for await (const value of streamSse(response)) {
136-
if (value.type === "message_start") {
137-
console.log(value);
138-
}
139-
if (value.delta?.text) {
179+
if (value.type === "content_block_start") {
180+
if (value.content_block?.type === "tool_use") {
181+
// Initialize a new tool call
182+
currentToolCall = {
183+
id: value.content_block.id || `call_${Date.now()}`,
184+
name: value.content_block.name,
185+
input: {}
186+
};
187+
}
188+
} else if (value.type === "content_block_delta" && currentToolCall) {
189+
// Update the tool call with new input data
190+
if (value.delta?.input) {
191+
currentToolCall.input = {
192+
...currentToolCall.input,
193+
...value.delta.input
194+
};
195+
}
196+
} else if (value.type === "content_block_stop" && currentToolCall) {
197+
// Finalize and yield the complete tool call
198+
yield {
199+
role: "assistant",
200+
content: "",
201+
toolCalls: [{
202+
id: currentToolCall.id,
203+
type: "function",
204+
function: {
205+
name: currentToolCall.name,
206+
arguments: JSON.stringify(currentToolCall.input)
207+
}
208+
}]
209+
};
210+
currentToolCall = null;
211+
} else if (value.delta?.text) {
212+
// Regular text response
140213
yield { role: "assistant", content: value.delta.text };
141214
}
142215
}
143216
}
144-
145217
//Gemini
146218

147219
private async *streamChatGemini(

core/llm/toolSupport.ts

+9-2
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,15 @@ export const PROVIDER_TOOL_SUPPORT: Record<
5858
return model.toLowerCase().includes("gemini");
5959
},
6060
vertexai: (model) => {
61-
// All gemini models except flash 2.0 lite support function calling
62-
return model.toLowerCase().includes("gemini") && !model.toLowerCase().includes("lite");;
61+
return [
62+
"claude-3-5",
63+
"claude-3.5",
64+
"claude-3-7",
65+
"claude-3.7",
66+
"gemini",
67+
].some((part) => model.toLowerCase().startsWith(part)) ||
68+
// All gemini models except flash 2.0 lite support function calling
69+
(model.toLowerCase().includes("gemini") && !model.toLowerCase().includes("lite"));
6370
},
6471
bedrock: (model) => {
6572
// For Bedrock, only support Claude Sonnet models with versions 3.5/3-5 and 3.7/3-7

0 commit comments

Comments
 (0)