@@ -27,9 +27,7 @@ aiohttp==3.11.18
27
27
# sglang
28
28
# vllm
29
29
aiosignal == 1.3.2
30
- # via
31
- # aiohttp
32
- # ray
30
+ # via aiohttp
33
31
airportsdata == 20250224
34
32
# via outlines
35
33
aliyun-python-sdk-core == 2.16.0
@@ -42,7 +40,7 @@ altair==5.5.0
42
40
# via streamlit
43
41
annotated-types == 0.7.0
44
42
# via pydantic
45
- anthropic == 0.50 .0
43
+ anthropic == 0.51 .0
46
44
# via sglang
47
45
antlr4-python3-runtime == 4.9.3
48
46
# via
@@ -81,6 +79,8 @@ blake3==1.0.4
81
79
# via vllm
82
80
blinker == 1.9.0
83
81
# via streamlit
82
+ blobfile == 3.0.0
83
+ # via sglang
84
84
cachetools == 5.5.2
85
85
# via
86
86
# evalscope
@@ -96,7 +96,7 @@ cffi==1.17.1
96
96
# via
97
97
# cryptography
98
98
# soundfile
99
- charset-normalizer == 3.4.1
99
+ charset-normalizer == 3.4.2
100
100
# via requests
101
101
click == 8.1.8
102
102
# via
@@ -125,13 +125,13 @@ contourpy==1.3.2
125
125
# via matplotlib
126
126
crcmod == 1.7
127
127
# via oss2
128
- cryptography == 44.0.2
128
+ cryptography == 44.0.3
129
129
# via aliyun-python-sdk-core
130
130
ctranslate2 == 4.6.0
131
131
# via infinity-emb
132
- cuda-bindings == 12.8 .0
132
+ cuda-bindings == 12.9 .0
133
133
# via cuda-python
134
- cuda-python == 12.8 .0
134
+ cuda-python == 12.9 .0
135
135
# via sglang
136
136
cupy-cuda12x == 13.4.1
137
137
# via ray
@@ -175,7 +175,7 @@ distro==1.9.0
175
175
# posthog
176
176
dnspython == 2.7.0
177
177
# via email-validator
178
- edge-tts == 7.0.1
178
+ edge-tts == 7.0.2
179
179
# via gpt-server (pyproject.toml)
180
180
editdistance == 0.8.1
181
181
# via
@@ -222,6 +222,7 @@ ffmpy==0.5.0
222
222
# via gpt-server (pyproject.toml)
223
223
filelock == 3.18.0
224
224
# via
225
+ # blobfile
225
226
# datasets
226
227
# huggingface-hub
227
228
# ray
@@ -230,11 +231,11 @@ filelock==3.18.0
230
231
# vllm
231
232
fire == 0.7.0
232
233
# via lmdeploy
233
- flashinfer-python == 0.2.3 +cu124torch2.5
234
+ flashinfer-python == 0.2.5 +cu124torch2.5
234
235
# via
235
236
# gpt-server (pyproject.toml)
236
237
# sglang
237
- flashtts == 0.1.4
238
+ flashtts == 0.1.5
238
239
# via gpt-server (pyproject.toml)
239
240
flatbuffers == 25.2.10
240
241
# via onnxruntime
@@ -246,7 +247,6 @@ frozenlist==1.6.0
246
247
# via
247
248
# aiohttp
248
249
# aiosignal
249
- # ray
250
250
fschat == 0.2.36
251
251
# via gpt-server (pyproject.toml)
252
252
fsspec == 2024.6.1
@@ -259,7 +259,7 @@ funasr==1.2.6
259
259
# via gpt-server (pyproject.toml)
260
260
future == 1.0.0
261
261
# via pyloudnorm
262
- gguf == 0.16.2
262
+ gguf == 0.16.3
263
263
# via vllm
264
264
gitdb == 4.0.12
265
265
# via gitpython
@@ -279,7 +279,7 @@ hf-transfer==0.1.9
279
279
# via
280
280
# infinity-emb
281
281
# sglang
282
- hf-xet == 1.0.5
282
+ hf-xet == 1.1.0
283
283
# via huggingface-hub
284
284
httpcore == 1.0.9
285
285
# via httpx
@@ -293,7 +293,7 @@ httpx==0.27.2
293
293
# fschat
294
294
# litellm
295
295
# openai
296
- huggingface-hub == 0.30.2
296
+ huggingface-hub == 0.31.1
297
297
# via
298
298
# accelerate
299
299
# datasets
@@ -326,7 +326,7 @@ importlib-metadata==8.0.0
326
326
# vllm
327
327
importlib-resources == 6.5.2
328
328
# via wetextprocessing
329
- infinity-emb == 0.0.73
329
+ infinity-emb == 0.0.76
330
330
# via gpt-server (pyproject.toml)
331
331
interegular == 0.3.3
332
332
# via
@@ -360,7 +360,7 @@ jiter==0.9.0
360
360
# openai
361
361
jmespath == 0.10.0
362
362
# via aliyun-python-sdk-core
363
- joblib == 1.4.2
363
+ joblib == 1.5.0
364
364
# via
365
365
# librosa
366
366
# nltk
@@ -386,7 +386,7 @@ lark==1.2.2
386
386
# via
387
387
# outlines
388
388
# vllm
389
- latex2mathml == 3.77 .0
389
+ latex2mathml == 3.78 .0
390
390
# via markdown2
391
391
lazy-loader == 0.4
392
392
# via librosa
@@ -406,12 +406,14 @@ llvmlite==0.44.0
406
406
# pynndescent
407
407
lm-format-enforcer == 0.10.11
408
408
# via vllm
409
- lmdeploy == 0.7.3
409
+ lmdeploy == 0.8.0
410
410
# via gpt-server (pyproject.toml)
411
411
loguru == 0.7.3
412
412
# via gpt-server (pyproject.toml)
413
413
lxml == 5.4.0
414
- # via sacrebleu
414
+ # via
415
+ # blobfile
416
+ # sacrebleu
415
417
markdown-it-py == 3.0.0
416
418
# via rich
417
419
markdown2 == 2.5.3
@@ -454,7 +456,7 @@ multiprocess==0.70.16
454
456
# via
455
457
# datasets
456
458
# evaluate
457
- narwhals == 1.37.1
459
+ narwhals == 1.38.0
458
460
# via
459
461
# altair
460
462
# plotly
@@ -560,7 +562,7 @@ nvidia-cusparse-cu12==12.3.1.170
560
562
# torch
561
563
nvidia-cusparselt-cu12 == 0.6.2
562
564
# via torch
563
- nvidia-ml-py == 12.570.86
565
+ nvidia-ml-py == 12.575.51
564
566
# via pynvml
565
567
nvidia-nccl-cu12 == 2.21.5
566
568
# via
@@ -624,11 +626,11 @@ opentelemetry-sdk==1.26.0
624
626
# vllm
625
627
opentelemetry-semantic-conventions == 0.47b0
626
628
# via opentelemetry-sdk
627
- opentelemetry-semantic-conventions-ai == 0.4.3
629
+ opentelemetry-semantic-conventions-ai == 0.4.7
628
630
# via vllm
629
631
optimum == 1.24.0
630
632
# via infinity-emb
631
- orjson == 3.10.17
633
+ orjson == 3.10.18
632
634
# via
633
635
# infinity-emb
634
636
# sglang
@@ -678,6 +680,7 @@ parso==0.8.4
678
680
# via jedi
679
681
partial-json-parser == 0.2.1.1.post5
680
682
# via
683
+ # lmdeploy
681
684
# sglang
682
685
# vllm
683
686
peft == 0.14.0
@@ -700,7 +703,7 @@ pillow==10.4.0
700
703
# streamlit
701
704
# torchvision
702
705
# vllm
703
- platformdirs == 4.3.7
706
+ platformdirs == 4.3.8
704
707
# via
705
708
# pooch
706
709
# yapf
@@ -766,7 +769,9 @@ pycparser==2.22
766
769
# via cffi
767
770
pycryptodome == 3.22.0
768
771
# via oss2
769
- pydantic == 2.11.3
772
+ pycryptodomex == 3.22.0
773
+ # via blobfile
774
+ pydantic == 2.11.4
770
775
# via
771
776
# anthropic
772
777
# compressed-tensors
@@ -782,7 +787,7 @@ pydantic==2.11.3
782
787
# sglang
783
788
# vllm
784
789
# xgrammar
785
- pydantic-core == 2.33.1
790
+ pydantic-core == 2.33.2
786
791
# via pydantic
787
792
pydeck == 0.9.1
788
793
# via streamlit
@@ -851,7 +856,7 @@ pyzmq==26.4.0
851
856
# vllm
852
857
qwen-vl-utils == 0.0.11
853
858
# via gpt-server (pyproject.toml)
854
- ray == 2.43 .0
859
+ ray == 2.46 .0
855
860
# via
856
861
# lmdeploy
857
862
# vllm
@@ -901,7 +906,7 @@ rich==13.9.4
901
906
# rich-toolkit
902
907
# streamlit
903
908
# typer
904
- rich-toolkit == 0.14.3
909
+ rich-toolkit == 0.14.5
905
910
# via fastapi-cli
906
911
rouge-chinese == 1.0.3
907
912
# via evalscope
@@ -963,9 +968,9 @@ setuptools==75.2.0
963
968
# torch
964
969
# triton
965
970
# vllm
966
- sgl-kernel == 0.1.0
971
+ sgl-kernel == 0.1.1
967
972
# via sglang
968
- sglang == 0.4.6.post1
973
+ sglang == 0.4.6.post2
969
974
# via gpt-server (pyproject.toml)
970
975
shellingham == 1.5.4
971
976
# via typer
@@ -1038,7 +1043,7 @@ tenacity==9.1.2
1038
1043
# via streamlit
1039
1044
tensorboardx == 2.6.2.2
1040
1045
# via funasr
1041
- termcolor == 3.0.1
1046
+ termcolor == 3.1.0
1042
1047
# via
1043
1048
# fire
1044
1049
# mmengine-lite
@@ -1195,6 +1200,7 @@ unicorn==2.1.3
1195
1200
# via evalscope
1196
1201
urllib3 == 2.4.0
1197
1202
# via
1203
+ # blobfile
1198
1204
# modelscope
1199
1205
# requests
1200
1206
uvicorn == 0.32.1
@@ -1211,7 +1217,7 @@ uvloop==0.21.0
1211
1217
# via
1212
1218
# sglang
1213
1219
# uvicorn
1214
- vllm == 0.8.5
1220
+ vllm == 0.8.5.post1
1215
1221
# via gpt-server (pyproject.toml)
1216
1222
watchdog == 5.0.3
1217
1223
# via streamlit
0 commit comments