/**
* A simple agentic chat flow using LangGraph instead of CrewAI.
*/
import { ChatOpenAI } from "@langchain/openai";
import { SystemMessage } from "@langchain/core/messages";
import { RunnableConfig } from "@langchain/core/runnables";
import { StateGraph, Command, START, END } from "@langchain/langgraph";
import { ClientStateAnnotation, ClientState } from "@ag-kit/adapter-langgraph";
import { MemorySaver } from "@langchain/langgraph";
async function chatNode(state: ClientState, config?: RunnableConfig) {
const model = new ChatOpenAI({
model: process.env.OPENAI_MODEL || "gpt-4o-mini",
apiKey: process.env.OPENAI_API_KEY,
configuration: {
baseURL: process.env.OPENAI_BASE_URL,
},
});
if (!config) {
config = { recursionLimit: 25 };
}
const modelWithTools = model.bindTools([...(state.client?.tools || [])], {
parallel_tool_calls: false,
});
const systemMessage = new SystemMessage({
content: "You are a helpful assistant.",
});
const response = await modelWithTools.invoke(
[systemMessage, ...state.messages],
config
);
return new Command({
goto: END,
update: {
messages: [response],
},
});
}
const workflow = new StateGraph(ClientStateAnnotation)
.addNode("chat_node", chatNode)
.addEdge(START, "chat_node")
.addEdge("chat_node", END);
export const agenticChatGraph = workflow.compile({
checkpointer: new MemorySaver(),
});