Skip to content

Commit 3475260

Browse files
tim13246879im-calvinmckaywrigley
authored
Store API keys in env to enable preset keys (#1093)
* Add API keys to profile if they exist in env * Revert unnecessary changes * Added optional API Keys * add disable to profile if environment variables are set * add disables for Azure API Keys + profile * add more modularity to openai organization user profile disable * refactor retrieval/keys to use reuseable functions; update model selector to 'see' environment variables * nits for build (types in types folder) * handle ui for profile envs --------- Co-authored-by: Kelvin Wong <[email protected]> Co-authored-by: Kelvin Wong <[email protected]> Co-authored-by: Mckay Wrigley <[email protected]>
1 parent a116ce0 commit 3475260

File tree

23 files changed

+474
-118
lines changed

23 files changed

+474
-118
lines changed

.env.local.example

+17
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,20 @@ SUPABASE_SERVICE_ROLE_KEY=
77

88
# Ollama
99
NEXT_PUBLIC_OLLAMA_URL=http://localhost:11434
10+
11+
# API Keys (Optional: Entering an API key here overrides the API keys globally for all users.)
12+
OPENAI_API_KEY=
13+
ANTHROPIC_API_KEY=
14+
GOOGLE_GEMINI_API_KEY=
15+
MISTRAL_API_KEY=
16+
PERPLEXITY_API_KEY=
17+
AZURE_OPENAI_API_KEY=
18+
19+
NEXT_PUBLIC_OPENAI_ORGANIZATION_ID=
20+
21+
# Azure API Information
22+
AZURE_OPENAI_API_KEY=
23+
NEXT_PUBLIC_AZURE_OPENAI_ENDPOINT=
24+
NEXT_PUBLIC_AZURE_GPT_35_TURBO_ID=
25+
NEXT_PUBLIC_AZURE_GPT_45_VISION_ID=
26+
NEXT_PUBLIC_AZURE_GPT_45_TURBO_ID=

.husky/pre-commit

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
#!/usr/bin/env sh
2+
23
. "$(dirname -- "$0")/_/husky.sh"
34

4-
npm run lint:fix && npm run format:write && git add .
5+
npm run lint:fix && npm run format:write && git add .

app/api/chat/anthropic/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { CHAT_SETTING_LIMITS } from "@/lib/chat-setting-limits"
2-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
2+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
33
import { ChatSettings } from "@/types"
44
import Anthropic from "@anthropic-ai/sdk"
55
import { AnthropicStream, StreamingTextResponse } from "ai"

app/api/chat/azure/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { CHAT_SETTING_LIMITS } from "@/lib/chat-setting-limits"
2-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
2+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
33
import { ChatAPIPayload } from "@/types"
44
import { OpenAIStream, StreamingTextResponse } from "ai"
55
import OpenAI from "openai"

app/api/chat/google/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
1+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
22
import { ChatSettings } from "@/types"
33
import { GoogleGenerativeAI } from "@google/generative-ai"
44

app/api/chat/mistral/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
1+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
22
import { ChatSettings } from "@/types"
33

44
export const runtime = "edge"

app/api/chat/openai/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { CHAT_SETTING_LIMITS } from "@/lib/chat-setting-limits"
2-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
2+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
33
import { ChatSettings } from "@/types"
44
import { OpenAIStream, StreamingTextResponse } from "ai"
55
import OpenAI from "openai"

app/api/chat/perplexity/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
1+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
22
import { ChatSettings } from "@/types"
33

44
export const runtime = "edge"

app/api/command/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { CHAT_SETTING_LIMITS } from "@/lib/chat-setting-limits"
2-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
2+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
33
import OpenAI from "openai"
44

55
export const runtime = "edge"

app/api/retrieval/keys/route.ts

+23
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
import { isUsingEnvironmentKey } from "@/lib/envs"
2+
import { createResponse } from "@/lib/server/server-utils"
3+
import { KeyTypeT } from "@/types/key-type"
4+
import { VALID_KEYS } from "@/types/valid-keys"
5+
6+
export async function POST(request: Request) {
7+
const json = await request.json()
8+
const { key } = json as {
9+
key: string
10+
}
11+
12+
if (!key) {
13+
return createResponse({ error: "Key type is required" }, 400)
14+
}
15+
16+
if (!(key in VALID_KEYS)) {
17+
return createResponse({ error: "Invalid key type" }, 400)
18+
}
19+
20+
const isUsing = isUsingEnvironmentKey(key as KeyTypeT)
21+
22+
return createResponse({ isUsing }, 200)
23+
}

app/api/retrieval/process/docx/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { generateLocalEmbedding } from "@/lib/generate-local-embedding"
22
import { processDocX } from "@/lib/retrieval/processing"
3-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
3+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
44
import { Database } from "@/supabase/types"
55
import { FileItemChunk } from "@/types"
66
import { createClient } from "@supabase/supabase-js"

app/api/retrieval/process/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ import {
66
processPdf,
77
processTxt
88
} from "@/lib/retrieval/processing"
9-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
9+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
1010
import { Database } from "@/supabase/types"
1111
import { FileItemChunk } from "@/types"
1212
import { createClient } from "@supabase/supabase-js"

app/api/retrieval/retrieve/route.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import { generateLocalEmbedding } from "@/lib/generate-local-embedding"
2-
import { checkApiKey, getServerProfile } from "@/lib/server-chat-helpers"
2+
import { checkApiKey, getServerProfile } from "@/lib/server/server-chat-helpers"
33
import { Database } from "@/supabase/types"
44
import { createClient } from "@supabase/supabase-js"
55
import OpenAI from "openai"

components/models/model-option.tsx

+12-3
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@ import { ChatbotUIContext } from "@/context/context"
22
import { isModelLocked } from "@/lib/is-model-locked"
33
import { LLM, LLMID } from "@/types"
44
import { IconLock } from "@tabler/icons-react"
5-
import { FC, useContext } from "react"
5+
import { FC, useContext, useEffect, useState } from "react"
66
import { WithTooltip } from "../ui/with-tooltip"
77
import { ModelIcon } from "./model-icon"
88

@@ -13,10 +13,19 @@ interface ModelOptionProps {
1313

1414
export const ModelOption: FC<ModelOptionProps> = ({ model, onSelect }) => {
1515
const { profile } = useContext(ChatbotUIContext)
16+
const [isLocked, setIsLocked] = useState<Boolean>(true)
1617

17-
if (!profile) return null
18+
useEffect(() => {
19+
async function setup() {
20+
if (!profile) return null
21+
const locked = await isModelLocked(model.provider, profile)
22+
23+
setIsLocked(locked)
24+
}
25+
setup()
26+
}, [model, profile])
1827

19-
const isLocked = isModelLocked(model.provider, profile)
28+
if (!profile) return null
2029

2130
const handleSelectModel = () => {
2231
if (isLocked) return

components/models/model-select.tsx

+13-1
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,8 @@ export const ModelSelect: FC<ModelSelectProps> = ({
3737
const [search, setSearch] = useState("")
3838
const [tab, setTab] = useState<"hosted" | "local">("hosted")
3939

40+
const [isLocked, setIsLocked] = useState<Boolean>(true)
41+
4042
useEffect(() => {
4143
if (isOpen) {
4244
setTimeout(() => {
@@ -45,6 +47,17 @@ export const ModelSelect: FC<ModelSelectProps> = ({
4547
}
4648
}, [isOpen])
4749

50+
useEffect(() => {
51+
const checkModelLock = async () => {
52+
if (SELECTED_MODEL && profile) {
53+
const locked = await isModelLocked(SELECTED_MODEL.provider, profile)
54+
setIsLocked(locked)
55+
}
56+
}
57+
58+
checkModelLock()
59+
}, [profile])
60+
4861
const handleSelectModel = (modelId: LLMID) => {
4962
onSelectModel(modelId)
5063
setIsOpen(false)
@@ -71,7 +84,6 @@ export const ModelSelect: FC<ModelSelectProps> = ({
7184
if (!SELECTED_MODEL) return null
7285
if (!profile) return null
7386

74-
const isLocked = isModelLocked(SELECTED_MODEL.provider, profile)
7587
const usingLocalModels = availableLocalModels.length > 0
7688

7789
return (

0 commit comments

Comments
 (0)