Response from LLM chat completion

interface ChatResponse {
    content: string;
    toolCalls?: ToolCall[];
    usage: TokenUsage;
    finishReason: "length" | "stop" | "tool_calls" | "content_filter";
}

Properties

content: string
toolCalls?: ToolCall[]
usage: TokenUsage
finishReason: "length" | "stop" | "tool_calls" | "content_filter"