@@ -262,6 +262,43 @@ const snippetTgi = (model: ModelData): LocalAppSnippet[] => {
262
262
] ;
263
263
} ;
264
264
265
+ const snippetMlxLm = ( model : ModelData ) : LocalAppSnippet [ ] => {
266
+ const openaiCurl = [
267
+ "# Calling the OpenAI-compatible server with curl" ,
268
+ `curl -X POST "http://localhost:8000/v1/chat/completions" \\` ,
269
+ ` -H "Content-Type: application/json" \\` ,
270
+ ` --data '{` ,
271
+ ` "model": "${ model . id } ",` ,
272
+ ` "messages": [` ,
273
+ ` {"role": "user", "content": "Hello"}` ,
274
+ ` ]` ,
275
+ ` }'` ,
276
+ ] ;
277
+
278
+ return [
279
+ {
280
+ title : "Generate or start a chat session" ,
281
+ setup : [ "# Install MLX LM" , "pip install mlx-lm" ] . join ( "\n" ) ,
282
+ content : [
283
+ "# One-shot generation" ,
284
+ `mlx_lm.generate --model "${ model . id } " --prompt "Hello"` ,
285
+ ...( model . tags . includes ( "conversational" )
286
+ ? [ "# Interactive chat REPL" , `mlx_lm.chat --model "${ model . id } "` ]
287
+ : [ ] ) ,
288
+ ] . join ( "\n" ) ,
289
+ } ,
290
+ ...( model . tags . includes ( "conversational" )
291
+ ? [
292
+ {
293
+ title : "Run an OpenAI-compatible server" ,
294
+ setup : [ "# Install MLX LM" , "pip install mlx-lm" ] . join ( "\n" ) ,
295
+ content : [ "# Start the server" , `mlx_lm.server --model "${ model . id } "` , ...openaiCurl ] . join ( "\n" ) ,
296
+ } ,
297
+ ]
298
+ : [ ] ) ,
299
+ ] ;
300
+ } ;
301
+
265
302
/**
266
303
* Add your new local app here.
267
304
*
@@ -302,6 +339,13 @@ export const LOCAL_APPS = {
302
339
( model . pipeline_tag === "text-generation" || model . pipeline_tag === "image-text-to-text" ) ,
303
340
snippet : snippetVllm ,
304
341
} ,
342
+ "mlx-lm" : {
343
+ prettyLabel : "MLX LM" ,
344
+ docsUrl : "https://github.com/ml-explore/mlx-lm" ,
345
+ mainTask : "text-generation" ,
346
+ displayOnModelPage : isMlxModel ,
347
+ snippet : snippetMlxLm ,
348
+ } ,
305
349
tgi : {
306
350
prettyLabel : "TGI" ,
307
351
docsUrl : "https://huggingface.co/docs/text-generation-inference/" ,
0 commit comments