const response = await agent.streamVNext(
[
{
role: "user",
content: prompt,
},
],
{
prepareStep: async ({ messages, model }) => {
// check if calling
const lastMessage = messages[messages.length - 1];
// if last message is a tool-call and the tool is updateWorkingMemory, we want to use a different model
if (
lastMessage.role === "assistant" &&
Array.isArray(lastMessage.content)
) {
const toolCall = lastMessage.content.find(
(c) => c.type === "tool-call"
);
if (toolCall && toolCall.toolName === "updateWorkingMemory") {
model = openai("gpt-4o-mini");
}
}
return {
model,
};
},
}
);
const response = await agent.streamVNext(
[
{
role: "user",
content: prompt,
},
],
{
prepareStep: async ({ messages, model }) => {
// check if calling
const lastMessage = messages[messages.length - 1];
// if last message is a tool-call and the tool is updateWorkingMemory, we want to use a different model
if (
lastMessage.role === "assistant" &&
Array.isArray(lastMessage.content)
) {
const toolCall = lastMessage.content.find(
(c) => c.type === "tool-call"
);
if (toolCall && toolCall.toolName === "updateWorkingMemory") {
model = openai("gpt-4o-mini");
}
}
return {
model,
};
},
}
);