diff --git a/extensions/vscode/config_schema.json b/extensions/vscode/config_schema.json index 48f6e2aaad..a9e20f2bb3 100644 --- a/extensions/vscode/config_schema.json +++ b/extensions/vscode/config_schema.json @@ -959,7 +959,8 @@ "gemini-2.0-flash-lite-preview-02-05", "gemini-2.0-flash-lite", "gemini-2.0-flash-exp-image-generation", - "gemini-2.5-pro-exp-03-25" + "gemini-2.5-pro-exp-03-25", + "gemini-2.5-pro-latest" ] } } @@ -1565,6 +1566,7 @@ "gemini-pro", "gemini-1.5-pro-latest", "gemini-1.5-pro", + "gemini-2.5-pro", "gemini-1.5-flash-latest", "gemini-1.5-flash", "mistral-tiny", diff --git a/gui/src/pages/AddNewModel/configs/models.ts b/gui/src/pages/AddNewModel/configs/models.ts index d21a7d66f9..7fbd97e4dc 100644 --- a/gui/src/pages/AddNewModel/configs/models.ts +++ b/gui/src/pages/AddNewModel/configs/models.ts @@ -929,6 +929,20 @@ export const models: { [key: string]: ModelPackage } = { providerOptions: ["gemini"], isOpenSource: false, }, + gemini25Pro: { + title: "Gemini 2.5 Pro", + description: + "Google's thinking by default Pro model with up to 64k output context. Best for complex tasks involving reasoning.", + params: { + title: "Gemini 2.5 Pro", + model: "gemini-2.5-pro", + contextLength: 1_048_576, + apiKey: "", + }, + icon: "gemini.png", + providerOptions: ["gemini"], + isOpenSource: false, + }, commandR: { title: "Command R", description: