generated from VectorInstitute/aieng-template-poetry
-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathbasic_usage.py
executable file
·43 lines (34 loc) · 1.27 KB
/
basic_usage.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
#!/usr/bin/env python
"""Basic example of Vector Inference API usage.
This script demonstrates the core features of the Vector Inference API
for launching and interacting with models.
"""
from vec_inf.client import VecInfClient
# Create the API client
client = VecInfClient()
# List available models
print("Listing available models...")
models = client.list_models()
print(f"Found {len(models)} models")
for model in models[:3]: # Show just the first few
print(f"- {model.name} ({model.type})")
# Launch a model (replace with an actual model name from your environment)
model_name = "Meta-Llama-3.1-8B-Instruct" # Use an available model from your list
print(f"\nLaunching {model_name}...")
response = client.launch_model(model_name)
job_id = response.slurm_job_id
print(f"Launched with job ID: {job_id}")
# Wait for the model to be ready
print("Waiting for model to be ready...")
status = client.wait_until_ready(job_id)
print(f"Model is ready at: {status.base_url}")
# Get metrics
print("\nRetrieving metrics...")
metrics = client.get_metrics(job_id)
if isinstance(metrics.metrics, dict):
for key, value in metrics.metrics.items():
print(f"- {key}: {value}")
# Shutdown when done
print("\nShutting down model...")
client.shutdown_model(job_id)
print("Model shutdown complete")