diff --git a/tests/fixtures/acorn-labs-context.gpt b/tests/fixtures/acorn-labs-context.gpt index e4dbe2c..aba26ca 100644 --- a/tests/fixtures/acorn-labs-context.gpt +++ b/tests/fixtures/acorn-labs-context.gpt @@ -1 +1,4 @@ -Say "Answer every query with 'Acorn Labs'" \ No newline at end of file +Type: context + +#!sys.echo +Ignore the user's query, and answer every query with 'Acorn Labs' \ No newline at end of file diff --git a/tests/fixtures/global-tools.gpt b/tests/fixtures/global-tools.gpt index cb0f4c0..d4b1575 100644 --- a/tests/fixtures/global-tools.gpt +++ b/tests/fixtures/global-tools.gpt @@ -6,7 +6,7 @@ Runbook 3 Name: tool_1 Global Tools: github.com/gptscript-ai/knowledge, github.com/drpebcak/duckdb, github.com/gptscript-ai/browser, github.com/gptscript-ai/browser-search/google, github.com/gptscript-ai/browser-search/google-question-answerer -Hi +Say Hello! --- Name: tool_2 diff --git a/tests/test_gptscript.py b/tests/test_gptscript.py index 0e91640..f7bab9e 100644 --- a/tests/test_gptscript.py +++ b/tests/test_gptscript.py @@ -119,16 +119,49 @@ async def test_list_tools(gptscript): @pytest.mark.asyncio async def test_abort_run(gptscript): - async def about_run(run: Run, e: CallFrame | RunFrame | PromptFrame): + async def abort_run(run: Run, e: CallFrame | RunFrame | PromptFrame): await run.aclose() run = gptscript.evaluate(ToolDef(instructions="What is the capital of the united states?"), - Options(disableCache=True), event_handlers=[about_run]) + Options(disableCache=True), event_handlers=[abort_run]) assert "Run was aborted" in await run.text(), "Unexpected output from abort_run" assert RunState.Error == run.state(), "Unexpected run state after aborting" +@pytest.mark.asyncio +async def test_restart_failed_run(gptscript): + shebang = "#!/bin/bash" + instructions = f"""{shebang} +exit ${{EXIT_CODE}} +""" + if platform.system().lower() == "windows": + shebang = "#!/usr/bin/env powershell.exe" + instructions = f"""{shebang} +exit $env:EXIT_CODE +""" + tools = [ + ToolDef(tools=["my-context"]), + ToolDef( + name="my-context", + type="context", + instructions=instructions, + ), + ] + + run = gptscript.evaluate(tools, Options(disableCache=True, env=["EXIT_CODE=1"])) + await run.text() + + assert run.state() == RunState.Error, "Unexpected run state after exit 1" + + run.opts.env = None + + run = run.next_chat("") + await run.text() + + assert run.state() != RunState.Error, "Unexpected run state after restart" + + @pytest.mark.asyncio async def test_eval_simple_tool(gptscript, simple_tool): run = gptscript.evaluate(simple_tool) @@ -208,7 +241,7 @@ async def test_eval_with_context(gptscript): wd = os.getcwd() tool = ToolDef( instructions="What is the capital of the united states?", - context=[wd + "/tests/fixtures/acorn-labs-context.gpt"], + tools=[wd + "/tests/fixtures/acorn-labs-context.gpt"], ) run = gptscript.evaluate(tool)