12345678910111213141516171819202122232425262728293031323334353637383940414243 |
- import argparse
- from langchain_core.messages import HumanMessage
- from agent.graph import graph
- def main() -> None:
- """Run the research agent from the command line."""
- parser = argparse.ArgumentParser(description="Run the LangGraph research agent")
- parser.add_argument("question", help="Research question")
- parser.add_argument(
- "--initial-queries",
- type=int,
- default=3,
- help="Number of initial search queries",
- )
- parser.add_argument(
- "--max-loops",
- type=int,
- default=2,
- help="Maximum number of research loops",
- )
- parser.add_argument(
- "--reasoning-model",
- default="gemini-2.5-pro-preview-05-06",
- help="Model for the final answer",
- )
- args = parser.parse_args()
- state = {
- "messages": [HumanMessage(content=args.question)],
- "initial_search_query_count": args.initial_queries,
- "max_research_loops": args.max_loops,
- "reasoning_model": args.reasoning_model,
- }
- result = graph.invoke(state)
- messages = result.get("messages", [])
- if messages:
- print(messages[-1].content)
- if __name__ == "__main__":
- main()
|