Skip to content

Instantly share code, notes, and snippets.

@elijahbenizzy
Created January 16, 2025 04:44
Show Gist options
  • Save elijahbenizzy/f5be2c99d8b6ed310f53bde33b6c87e7 to your computer and use it in GitHub Desktop.
Save elijahbenizzy/f5be2c99d8b6ed310f53bde33b6c87e7 to your computer and use it in GitHub Desktop.
from burr.core import action, State, ApplicationBuilder
@action(reads=[], writes=["prompt", "chat_history"])
def human_input(state: State, prompt: str) -> State:
# your code -- write what you want here!
return state.update(prompt=prompt).append(chat_history=chat_item)
@action(reads=["chat_history"], writes=["response", "chat_history"])
def ai_response(state: State) -> State:
response = _query_llm(state["chat_history"]) # Burr doesn't care how you use LLMs!
return state.update(response=content).append(chat_history=chat_item)
app = (
ApplicationBuilder()
.with_actions(human_input, ai_response)
.with_transitions(
("human_input", "ai_response"),
("ai_response", "human_input")
).with_state(chat_history=[])
.with_entrypoint("human_input")
.build()
)
*_, state = app.run(halt_after=["ai_response"], inputs={"prompt": "Who was Aaron Burr, sir?"})
print("answer:", app.state["response"])
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment