Skip to content

Commit 930388b

Browse files
authored
Merge pull request #8 from All-Hands-AI/xw/mcp-fork
Remove unnecessary commits and make MCPStdioConfig cleaner
2 parents f78727b + abc2063 commit 930388b

26 files changed

+292
-304
lines changed

.gitignore

-1
Original file line numberDiff line numberDiff line change
@@ -233,4 +233,3 @@ containers/runtime/Dockerfile
233233
containers/runtime/project.tar.gz
234234
containers/runtime/code
235235
**/node_modules/
236-
yes/

config.template.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -383,4 +383,4 @@ type = "noop"
383383
#################################### Eval ####################################
384384
# Configuration for the evaluation, please refer to the specific evaluation
385385
# plugin for the available options
386-
##############################################################################
386+
##############################################################################

docker-compose.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
12
services:
23
openhands:
34
build:
@@ -9,7 +10,6 @@ services:
910
- SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/all-hands-ai/runtime:0.31-nikolaik}
1011
#- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} # enable this only if you want a specific non-root sandbox user but you will have to manually adjust permissions of openhands-state for this user
1112
- WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace}
12-
- LOG_ALL_EVENTS=true
1313
ports:
1414
- "3000:3000"
1515
extra_hosts:

frontend/__tests__/routes/settings.test.tsx

-11
Original file line numberDiff line numberDiff line change
@@ -61,17 +61,6 @@ describe("Settings Screen", () => {
6161
};
6262

6363
it("should render", async () => {
64-
// Mock the config to ensure LLM Settings are visible
65-
getConfigSpy.mockResolvedValue({
66-
APP_MODE: "oss",
67-
GITHUB_CLIENT_ID: "test-github-client-id",
68-
POSTHOG_CLIENT_KEY: "test-posthog-client-key",
69-
FEATURE_FLAGS: {
70-
ENABLE_BILLING: false,
71-
HIDE_LLM_SETTINGS: false,
72-
},
73-
});
74-
7564
renderSettingsScreen();
7665

7766
await waitFor(() => {

frontend/src/i18n/declaration.ts

-1
Original file line numberDiff line numberDiff line change
@@ -301,7 +301,6 @@ export enum I18nKey {
301301
STATUS$ERROR_LLM_SERVICE_UNAVAILABLE = "STATUS$ERROR_LLM_SERVICE_UNAVAILABLE",
302302
STATUS$ERROR_LLM_INTERNAL_SERVER_ERROR = "STATUS$ERROR_LLM_INTERNAL_SERVER_ERROR",
303303
STATUS$ERROR_LLM_OUT_OF_CREDITS = "STATUS$ERROR_LLM_OUT_OF_CREDITS",
304-
STATUS$ERROR_LLM_CONTENT_POLICY_VIOLATION = "STATUS$ERROR_LLM_CONTENT_POLICY_VIOLATION",
305304
STATUS$ERROR_RUNTIME_DISCONNECTED = "STATUS$ERROR_RUNTIME_DISCONNECTED",
306305
STATUS$LLM_RETRY = "STATUS$LLM_RETRY",
307306
AGENT_ERROR$BAD_ACTION = "AGENT_ERROR$BAD_ACTION",

openhands/agenthub/codeact_agent/function_calling.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
from openhands.events.event import FileEditSource, FileReadSource
4343
from openhands.events.tool import ToolCallMetadata
4444
from openhands.llm import LLM
45-
from openhands.mcp.mcp import BaseTool
45+
from openhands.mcp import MCPClientTool
4646

4747

4848
def combine_thought(action: Action, thought: str) -> Action:
@@ -73,7 +73,7 @@ def response_to_actions(response: ModelResponse) -> list[Action]:
7373
# Process each tool call to OpenHands action
7474
for i, tool_call in enumerate(assistant_msg.tool_calls):
7575
action: Action
76-
logger.warning(f'Tool call in function_calling.py: {tool_call}')
76+
logger.debug(f'Tool call in function_calling.py: {tool_call}')
7777
try:
7878
arguments = json.loads(tool_call.function.arguments)
7979
except json.decoder.JSONDecodeError as e:
@@ -199,9 +199,9 @@ def response_to_actions(response: ModelResponse) -> list[Action]:
199199
# ================================================
200200
# McpAction (MCP)
201201
# ================================================
202-
elif tool_call.function.name.endswith(BaseTool.postfix()):
202+
elif tool_call.function.name.endswith(MCPClientTool.postfix()):
203203
action = McpAction(
204-
name=tool_call.function.name.rstrip(BaseTool.postfix()),
204+
name=tool_call.function.name.rstrip(MCPClientTool.postfix()),
205205
arguments=tool_call.function.arguments,
206206
)
207207
else:

openhands/core/cli.py

+2-1
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55

66
from termcolor import colored
77

8+
import openhands.agenthub # noqa F401 (we import this to get the agents registered)
89
from openhands.core.config import (
910
AppConfig,
1011
parse_arguments,
@@ -36,7 +37,7 @@
3637
FileEditObservation,
3738
)
3839
from openhands.io import read_input, read_task
39-
from openhands.mcp.mcp import fetch_mcp_tools_from_config
40+
from openhands.mcp import fetch_mcp_tools_from_config
4041

4142

4243
def display_message(message: str):

openhands/core/config/mcp_config.py

+29-36
Original file line numberDiff line numberDiff line change
@@ -31,50 +31,43 @@ def validate_servers(self) -> None:
3131
raise ValueError(f'Invalid URL {url}: {str(e)}')
3232

3333

34+
class MCPStdioConfigEntry(BaseModel):
35+
"""Configuration for a single MCP stdio entry.
36+
37+
Attributes:
38+
command: The command to run.
39+
args: List of arguments for the command.
40+
env: Dictionary of environment variables.
41+
"""
42+
43+
command: str
44+
args: list[str] = Field(default_factory=list)
45+
env: dict[str, str] = Field(default_factory=dict)
46+
47+
model_config = {'extra': 'forbid'}
48+
49+
3450
class MCPStdioConfig(BaseModel):
3551
"""Configuration for MCP stdio settings.
3652
3753
Attributes:
38-
commands: List of commands to run.
39-
args: List of arguments for each command.
40-
envs: List of environment variable tuples for each command.
54+
tools: Dictionary of tool configurations, where keys are tool names.
4155
"""
4256

43-
commands: List[str] = Field(default_factory=list)
44-
args: List[List[str]] = Field(default_factory=list)
45-
envs: List[List[tuple[str, str]]] = Field(default_factory=list)
57+
tools: dict[str, MCPStdioConfigEntry] = Field(default_factory=dict)
4658

4759
model_config = {'extra': 'forbid'}
4860

4961
def validate_stdio(self) -> None:
50-
"""Validate that commands, args, and envs are properly configured."""
51-
52-
# Check if number of commands matches number of args lists
53-
if len(self.commands) != len(self.args):
54-
raise ValueError(
55-
f'Number of commands ({len(self.commands)}) does not match '
56-
f'number of args lists ({len(self.args)})'
57-
)
58-
59-
# Check if number of commands matches number of envs lists
60-
if len(self.commands) != len(self.envs):
61-
raise ValueError(
62-
f'Number of commands ({len(self.commands)}) does not match '
63-
f'number of envs lists ({len(self.envs)})'
64-
)
65-
66-
# Validate each environment variable tuple
67-
for i, env_list in enumerate(self.envs):
68-
for j, env_tuple in enumerate(env_list):
69-
if not isinstance(env_tuple, tuple) or len(env_tuple) != 2:
70-
raise ValueError(
71-
f'Environment variable at index {j} for command {i} must be a tuple of (key, value)'
72-
)
73-
key, value = env_tuple
74-
if not isinstance(key, str) or not isinstance(value, str):
75-
raise ValueError(
76-
f'Environment variable key and value at index {j} for command {i} must be strings'
77-
)
62+
"""Validate that tools are properly configured."""
63+
# Tool names validation
64+
for tool_name in self.tools:
65+
if not tool_name.strip():
66+
raise ValueError('Tool names cannot be empty')
67+
if not tool_name.replace('-', '').isalnum():
68+
raise ValueError(
69+
f'Invalid tool name: {tool_name}. Tool names must be alphanumeric (hyphens allowed)'
70+
)
7871

7972

8073
class MCPConfig(BaseModel):
@@ -105,11 +98,11 @@ def from_toml_section(cls, data: dict) -> dict[str, 'MCPConfig']:
10598

10699
try:
107100
# Create SSE config if present
108-
sse_config = MCPSSEConfig(**data.get('mcp-sse', {}))
101+
sse_config = MCPSSEConfig.model_validate(data.get('mcp-sse', {}))
109102
sse_config.validate_servers()
110103

111104
# Create stdio config if present
112-
stdio_config = MCPStdioConfig(**data.get('mcp-stdio', {}))
105+
stdio_config = MCPStdioConfig.model_validate(data.get('mcp-stdio', {}))
113106
stdio_config.validate_stdio()
114107

115108
# Create the main MCP config

openhands/core/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
from openhands.events.event import Event
3131
from openhands.events.observation import AgentStateChangedObservation
3232
from openhands.io import read_input, read_task
33-
from openhands.mcp.mcp import fetch_mcp_tools_from_config
33+
from openhands.mcp import fetch_mcp_tools_from_config
3434
from openhands.memory.memory import Memory
3535
from openhands.runtime.base import Runtime
3636
from openhands.utils.async_utils import call_async_from_sync

openhands/llm/async_llm.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import asyncio
22
from functools import partial
3-
from typing import Any
3+
from typing import Any, Callable
44

55
from litellm import acompletion as litellm_acompletion
66

@@ -17,7 +17,7 @@
1717
class AsyncLLM(LLM):
1818
"""Asynchronous LLM class."""
1919

20-
def __init__(self, *args, **kwargs):
20+
def __init__(self, *args: Any, **kwargs: Any) -> None:
2121
super().__init__(*args, **kwargs)
2222

2323
self._async_completion = partial(
@@ -46,7 +46,7 @@ def __init__(self, *args, **kwargs):
4646
retry_max_wait=self.config.retry_max_wait,
4747
retry_multiplier=self.config.retry_multiplier,
4848
)
49-
async def async_completion_wrapper(*args, **kwargs):
49+
async def async_completion_wrapper(*args: Any, **kwargs: Any) -> Any:
5050
"""Wrapper for the litellm acompletion function that adds logging and cost tracking."""
5151
messages: list[dict[str, Any]] | dict[str, Any] = []
5252

@@ -77,7 +77,7 @@ async def async_completion_wrapper(*args, **kwargs):
7777

7878
self.log_prompt(messages)
7979

80-
async def check_stopped():
80+
async def check_stopped() -> None:
8181
while should_continue():
8282
if (
8383
hasattr(self.config, 'on_cancel_requested_fn')
@@ -117,14 +117,14 @@ async def check_stopped():
117117
except asyncio.CancelledError:
118118
pass
119119

120-
self._async_completion = async_completion_wrapper # type: ignore
120+
self._async_completion = async_completion_wrapper
121121

122-
async def _call_acompletion(self, *args, **kwargs):
122+
async def _call_acompletion(self, *args: Any, **kwargs: Any) -> Any:
123123
"""Wrapper for the litellm acompletion function."""
124124
# Used in testing?
125125
return await litellm_acompletion(*args, **kwargs)
126126

127127
@property
128-
def async_completion(self):
128+
def async_completion(self) -> Callable:
129129
"""Decorator for the async litellm acompletion function."""
130130
return self._async_completion

openhands/llm/bedrock.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -28,5 +28,5 @@ def list_foundation_models(
2828
return []
2929

3030

31-
def remove_error_modelId(model_list):
31+
def remove_error_modelId(model_list: list[str]) -> list[str]:
3232
return list(filter(lambda m: not m.startswith('bedrock'), model_list))

openhands/llm/debug_mixin.py

+7-7
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88

99
class DebugMixin:
10-
def log_prompt(self, messages: list[dict[str, Any]] | dict[str, Any]):
10+
def log_prompt(self, messages: list[dict[str, Any]] | dict[str, Any]) -> None:
1111
if not messages:
1212
logger.debug('No completion messages!')
1313
return
@@ -24,30 +24,30 @@ def log_prompt(self, messages: list[dict[str, Any]] | dict[str, Any]):
2424
else:
2525
logger.debug('No completion messages!')
2626

27-
def log_response(self, message_back: str):
27+
def log_response(self, message_back: str) -> None:
2828
if message_back:
2929
llm_response_logger.debug(message_back)
3030

31-
def _format_message_content(self, message: dict[str, Any]):
31+
def _format_message_content(self, message: dict[str, Any]) -> str:
3232
content = message['content']
3333
if isinstance(content, list):
3434
return '\n'.join(
3535
self._format_content_element(element) for element in content
3636
)
3737
return str(content)
3838

39-
def _format_content_element(self, element: dict[str, Any]):
39+
def _format_content_element(self, element: dict[str, Any] | Any) -> str:
4040
if isinstance(element, dict):
4141
if 'text' in element:
42-
return element['text']
42+
return str(element['text'])
4343
if (
4444
self.vision_is_active()
4545
and 'image_url' in element
4646
and 'url' in element['image_url']
4747
):
48-
return element['image_url']['url']
48+
return str(element['image_url']['url'])
4949
return str(element)
5050

5151
# This method should be implemented in the class that uses DebugMixin
52-
def vision_is_active(self):
52+
def vision_is_active(self) -> bool:
5353
raise NotImplementedError

openhands/llm/llm.py

+14-12
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def __init__(
186186
retry_multiplier=self.config.retry_multiplier,
187187
retry_listener=self.retry_listener,
188188
)
189-
def wrapper(*args, **kwargs):
189+
def wrapper(*args: Any, **kwargs: Any) -> Any:
190190
"""Wrapper for the litellm completion function. Logs the input and output of the completion function."""
191191
from openhands.io import json
192192

@@ -355,14 +355,14 @@ def wrapper(*args, **kwargs):
355355
self._completion = wrapper
356356

357357
@property
358-
def completion(self):
358+
def completion(self) -> Callable:
359359
"""Decorator for the litellm completion function.
360360
361361
Check the complete documentation at https://litellm.vercel.app/docs/completion
362362
"""
363363
return self._completion
364364

365-
def init_model_info(self):
365+
def init_model_info(self) -> None:
366366
if self._tried_model_info:
367367
return
368368
self._tried_model_info = True
@@ -622,10 +622,12 @@ def get_token_count(self, messages: list[dict] | list[Message]) -> int:
622622
# try to get the token count with the default litellm tokenizers
623623
# or the custom tokenizer if set for this LLM configuration
624624
try:
625-
return litellm.token_counter(
626-
model=self.config.model,
627-
messages=messages,
628-
custom_tokenizer=self.tokenizer,
625+
return int(
626+
litellm.token_counter(
627+
model=self.config.model,
628+
messages=messages,
629+
custom_tokenizer=self.tokenizer,
630+
)
629631
)
630632
except Exception as e:
631633
# limit logspam in case token count is not supported
@@ -654,7 +656,7 @@ def _is_local(self) -> bool:
654656
return True
655657
return False
656658

657-
def _completion_cost(self, response) -> float:
659+
def _completion_cost(self, response: Any) -> float:
658660
"""Calculate completion cost and update metrics with running total.
659661
660662
Calculate the cost of a completion response based on the model. Local models are treated as free.
@@ -707,21 +709,21 @@ def _completion_cost(self, response) -> float:
707709
logger.debug(
708710
f'Using fallback model name {_model_name} to get cost: {cost}'
709711
)
710-
self.metrics.add_cost(cost)
711-
return cost
712+
self.metrics.add_cost(float(cost))
713+
return float(cost)
712714
except Exception:
713715
self.cost_metric_supported = False
714716
logger.debug('Cost calculation not supported for this model.')
715717
return 0.0
716718

717-
def __str__(self):
719+
def __str__(self) -> str:
718720
if self.config.api_version:
719721
return f'LLM(model={self.config.model}, api_version={self.config.api_version}, base_url={self.config.base_url})'
720722
elif self.config.base_url:
721723
return f'LLM(model={self.config.model}, base_url={self.config.base_url})'
722724
return f'LLM(model={self.config.model})'
723725

724-
def __repr__(self):
726+
def __repr__(self) -> str:
725727
return str(self)
726728

727729
def reset(self) -> None:

0 commit comments

Comments
 (0)