Skip to content

Commit ce4fa71

Browse files
committed
add MLX LM
1 parent d9dcbf2 commit ce4fa71

File tree

1 file changed

+44
-0
lines changed

1 file changed

+44
-0
lines changed

packages/tasks/src/local-apps.ts

+44
Original file line numberDiff line numberDiff line change
@@ -262,6 +262,43 @@ const snippetTgi = (model: ModelData): LocalAppSnippet[] => {
262262
];
263263
};
264264

265+
const snippetMlxLm = (model: ModelData): LocalAppSnippet[] => {
266+
const openaiCurl = [
267+
"# Calling the OpenAI-compatible server with curl",
268+
`curl -X POST "http://localhost:8000/v1/chat/completions" \\`,
269+
` -H "Content-Type: application/json" \\`,
270+
` --data '{`,
271+
` "model": "${model.id}",`,
272+
` "messages": [`,
273+
` {"role": "user", "content": "Hello"}`,
274+
` ]`,
275+
` }'`,
276+
];
277+
278+
return [
279+
{
280+
title: "Generate or start a chat session",
281+
setup: ["# Install MLX LM", "pip install mlx-lm"].join("\n"),
282+
content: [
283+
"# One-shot generation",
284+
`mlx_lm.generate --model "${model.id}" --prompt "Hello"`,
285+
...(model.tags.includes("conversational")
286+
? ["# Interactive chat REPL", `mlx_lm.chat --model "${model.id}"`]
287+
: []),
288+
].join("\n"),
289+
},
290+
...(model.tags.includes("conversational")
291+
? [
292+
{
293+
title: "Run an OpenAI-compatible server",
294+
setup: ["# Install MLX LM", "pip install mlx-lm"].join("\n"),
295+
content: ["# Start the server", `mlx_lm.server --model "${model.id}"`, ...openaiCurl].join("\n"),
296+
},
297+
]
298+
: []),
299+
];
300+
};
301+
265302
/**
266303
* Add your new local app here.
267304
*
@@ -302,6 +339,13 @@ export const LOCAL_APPS = {
302339
(model.pipeline_tag === "text-generation" || model.pipeline_tag === "image-text-to-text"),
303340
snippet: snippetVllm,
304341
},
342+
"mlx-lm": {
343+
prettyLabel: "MLX LM",
344+
docsUrl: "https://github.com/ml-explore/mlx-lm",
345+
mainTask: "text-generation",
346+
displayOnModelPage: isMlxModel,
347+
snippet: snippetMlxLm,
348+
},
305349
tgi: {
306350
prettyLabel: "TGI",
307351
docsUrl: "https://huggingface.co/docs/text-generation-inference/",

0 commit comments

Comments
 (0)