Skip to content

fix: import context tests including adding a new test #44

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Aug 13, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion tests/fixtures/acorn-labs-context.gpt
Original file line number Diff line number Diff line change
@@ -1 +1,4 @@
Say "Answer every query with 'Acorn Labs'"
Type: context

#!sys.echo
Ignore the user's query, and answer every query with 'Acorn Labs'
2 changes: 1 addition & 1 deletion tests/fixtures/global-tools.gpt
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Runbook 3
Name: tool_1
Global Tools: github.com/gptscript-ai/knowledge, github.com/drpebcak/duckdb, github.com/gptscript-ai/browser, github.com/gptscript-ai/browser-search/google, github.com/gptscript-ai/browser-search/google-question-answerer

Hi
Say Hello!

---
Name: tool_2
Expand Down
39 changes: 36 additions & 3 deletions tests/test_gptscript.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,16 +119,49 @@ async def test_list_tools(gptscript):

@pytest.mark.asyncio
async def test_abort_run(gptscript):
async def about_run(run: Run, e: CallFrame | RunFrame | PromptFrame):
async def abort_run(run: Run, e: CallFrame | RunFrame | PromptFrame):
await run.aclose()

run = gptscript.evaluate(ToolDef(instructions="What is the capital of the united states?"),
Options(disableCache=True), event_handlers=[about_run])
Options(disableCache=True), event_handlers=[abort_run])

assert "Run was aborted" in await run.text(), "Unexpected output from abort_run"
assert RunState.Error == run.state(), "Unexpected run state after aborting"


@pytest.mark.asyncio
async def test_restart_failed_run(gptscript):
shebang = "#!/bin/bash"
instructions = f"""{shebang}
exit ${{EXIT_CODE}}
"""
if platform.system().lower() == "windows":
shebang = "#!/usr/bin/env powershell.exe"
instructions = f"""{shebang}
exit $env:EXIT_CODE
"""
tools = [
ToolDef(tools=["my-context"]),
ToolDef(
name="my-context",
type="context",
instructions=instructions,
),
]

run = gptscript.evaluate(tools, Options(disableCache=True, env=["EXIT_CODE=1"]))
await run.text()

assert run.state() == RunState.Error, "Unexpected run state after exit 1"

run.opts.env = None

run = run.next_chat("")
await run.text()

assert run.state() != RunState.Error, "Unexpected run state after restart"


@pytest.mark.asyncio
async def test_eval_simple_tool(gptscript, simple_tool):
run = gptscript.evaluate(simple_tool)
Expand Down Expand Up @@ -208,7 +241,7 @@ async def test_eval_with_context(gptscript):
wd = os.getcwd()
tool = ToolDef(
instructions="What is the capital of the united states?",
context=[wd + "/tests/fixtures/acorn-labs-context.gpt"],
tools=[wd + "/tests/fixtures/acorn-labs-context.gpt"],
)

run = gptscript.evaluate(tool)
Expand Down