import os
import autogen
from langchain_core.messages import convert_to_openai_messages
from langgraph.graph import StateGraph, MessagesState, START
from langgraph.checkpoint.memory import MemorySaver
# AutoGen configuration
config_list = [{"model": "gpt-4o", "api_key": os.environ["OPENAI_API_KEY"]}]
llm_config = {
"timeout": 600,
"cache_seed": 42,
"config_list": config_list,
"temperature": 0,
}
# Create AutoGen agents
autogen_agent = autogen.AssistantAgent(
name="assistant",
llm_config=llm_config,
)
user_proxy = autogen.UserProxyAgent(
name="user_proxy",
human_input_mode="NEVER",
max_consecutive_auto_reply=10,
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
code_execution_config={
"work_dir": "/tmp/autogen_work",
"use_docker": False,
},
llm_config=llm_config,
system_message="Reply TERMINATE if the task has been solved at full satisfaction.",
)
def call_autogen_agent(state: MessagesState):
"""Node function that calls the AutoGen agent"""
messages = convert_to_openai_messages(state["messages"])
last_message = messages[-1]
carryover = messages[:-1] if len(messages) > 1 else []
response = user_proxy.initiate_chat(
autogen_agent,
message=last_message,
carryover=carryover
)
final_content = response.chat_history[-1]["content"]
return {"messages": {"role": "assistant", "content": final_content}}
# Create and compile the graph
def create_graph():
checkpointer = MemorySaver()
builder = StateGraph(MessagesState)
builder.add_node("autogen", call_autogen_agent)
builder.add_edge(START, "autogen")
return builder.compile(checkpointer=checkpointer)
# Export the graph for LangSmith
graph = create_graph()