Skip to content

chat.py fixes #84

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 10 commits into from
Jun 24, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 7 additions & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,15 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
- Added `TLMChatCompletion` module, providing support for trust scoring with OpenAI ChatCompletion objects
- Added a VPC compatible version of `TLMChatCompletion`

### Changed

- Revised tools prompt in `chat.py`

### Fixed

- Bug fix for formatting system prompt after user messages
- Bug fix in `chat.py` for formatting system prompt after user messages
- Bug fix in `chat.py` for empty tool list still using tools prompt
- Bug fix in `chat.py` for handling empty strings args

## [1.1.9] - 2025-06-17

Expand Down
24 changes: 15 additions & 9 deletions src/cleanlab_tlm/utils/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
_SYSTEM_PREFIX = "System: "
_USER_PREFIX = "User: "
_ASSISTANT_PREFIX = "Assistant: "
_TOOL_PREFIX = "Tool: "

# Define role constants
_SYSTEM_ROLE: Literal["system"] = "system"
Expand All @@ -41,11 +42,12 @@

# Define tool-related message prefixes
_TOOL_DEFINITIONS_PREFIX = (
"You are a function calling AI model. You are provided with function signatures within "
"You are an AI Assistant that can call provided tools (a.k.a. functions). "
"The set of available tools is provided to you as function signatures within "
f"{_TOOLS_TAG_START} {_TOOLS_TAG_END} XML tags. "
"You may call one or more functions to assist with the user query. If available tools are not relevant in assisting "
"with user query, just respond in natural conversational language. Don't make assumptions about what values to plug "
"into functions. After calling & executing the functions, you will be provided with function results within "
"You may call one or more of these functions to assist with the user query. If the provided functions are not helpful/relevant, "
"then just respond in natural conversational language. Don't make assumptions about what values to plug "
"into functions. After you choose to call a function, you will be provided with the function's results within "
f"{_TOOL_RESPONSE_TAG_START} {_TOOL_RESPONSE_TAG_END} XML tags.\n\n"
f"{_TOOLS_TAG_START}\n"
)
Expand Down Expand Up @@ -231,7 +233,7 @@ def _form_prompt_responses_api(
last_system_idx = _find_index_after_first_system_block(messages)

# Insert tool definitions and instructions after system messages if needed
if tools is not None:
if tools is not None and len(tools) > 0:
messages.insert(
last_system_idx + 1,
{
Expand Down Expand Up @@ -272,11 +274,12 @@ def _form_prompt_responses_api(
# Format function call as JSON within XML tags, now including call_id
function_call = {
"name": msg["name"],
"arguments": json.loads(msg["arguments"]),
"arguments": json.loads(msg["arguments"]) if msg["arguments"] else {},
"call_id": call_id,
}
output += f"{_TOOL_CALL_TAG_START}\n{json.dumps(function_call, indent=2)}\n{_TOOL_CALL_TAG_END}\n\n"
elif msg["type"] == _FUNCTION_CALL_OUTPUT_TYPE:
output += _TOOL_PREFIX
call_id = msg.get("call_id", "")
name = function_names.get(call_id, "function")
# Format function response as JSON within XML tags
Expand Down Expand Up @@ -319,7 +322,7 @@ def _form_prompt_chat_completions_api(
# Find the index after the first consecutive block of system messages
last_system_idx = _find_index_after_first_system_block(cast(list[dict[str, Any]], messages))

if tools is not None:
if tools is not None and len(tools) > 0:
messages.insert(
last_system_idx + 1,
{
Expand All @@ -329,7 +332,7 @@ def _form_prompt_chat_completions_api(
)

# Only return content directly if there's a single user message AND no tools
if len(messages) == 1 and messages[0].get("role") == _USER_ROLE and tools is None:
if len(messages) == 1 and messages[0].get("role") == _USER_ROLE and (tools is None or len(tools) == 0):
return output + str(messages[0]["content"])

# Warn if the last message is an assistant message with tool calls
Expand Down Expand Up @@ -359,12 +362,15 @@ def _form_prompt_chat_completions_api(
# Format function call as JSON within XML tags, now including call_id
function_call = {
"name": tool_call["function"]["name"],
"arguments": json.loads(tool_call["function"]["arguments"]),
"arguments": json.loads(tool_call["function"]["arguments"])
if tool_call["function"]["arguments"]
else {},
"call_id": call_id,
}
output += f"{_TOOL_CALL_TAG_START}\n{json.dumps(function_call, indent=2)}\n{_TOOL_CALL_TAG_END}\n\n"
elif msg["role"] == _TOOL_ROLE:
# Handle tool responses
output += _TOOL_PREFIX
call_id = msg["tool_call_id"]
name = function_names.get(call_id, "function")
# Format function response as JSON within XML tags
Expand Down
Loading