usage

Record Token Usage after Streaming User Interfaces

When you're streaming structured data with streamUI, you may want to record the token usage for billing purposes.

onFinish Callback

You can use the onFinish callback to record token usage. It is called when the stream is finished.

'use client';
import { useState } from 'react';
import { ClientMessage } from './actions';
import { useActions, useUIState } from 'ai/rsc';
import { generateId } from 'ai';
// Allow streaming responses up to 30 seconds
export const maxDuration = 30;
export default function Home() {
const [input, setInput] = useState<string>('');
const [conversation, setConversation] = useUIState();
const { continueConversation } = useActions();
return (
<div>
<div>
{conversation.map((message: ClientMessage) => (
<div key={message.id}>
{message.role}: {message.display}
</div>
))}
</div>
<div>
<input
type="text"
value={input}
onChange={event => {
setInput(event.target.value);
}}
/>
<button
onClick={async () => {
setConversation((currentConversation: ClientMessage[]) => [
...currentConversation,
{ id: generateId(), role: 'user', display: input },
]);
const message = await continueConversation(input);
setConversation((currentConversation: ClientMessage[]) => [
...currentConversation,
message,
]);
}}
>
Send Message
</button>
</div>
</div>
);
}

Server

'use server';
import { createAI, getMutableAIState, streamUI } from 'ai/rsc';
import { openai } from '@ai-sdk/openai';
import { ReactNode } from 'react';
import { z } from 'zod';
import { generateId } from 'ai';
export interface ServerMessage {
role: 'user' | 'assistant';
content: string;
}
export interface ClientMessage {
id: string;
role: 'user' | 'assistant';
display: ReactNode;
}
export async function continueConversation(
input: string,
): Promise<ClientMessage> {
'use server';
const history = getMutableAIState();
const result = await streamUI({
model: openai('gpt-3.5-turbo'),
messages: [...history.get(), { role: 'user', content: input }],
text: ({ content, done }) => {
if (done) {
history.done((messages: ServerMessage[]) => [
...messages,
{ role: 'assistant', content },
]);
}
return <div>{content}</div>;
},
tools: {
deploy: {
description: 'Deploy repository to vercel',
parameters: z.object({
repositoryName: z
.string()
.describe('The name of the repository, example: vercel/ai-chatbot'),
}),
generate: async function* ({ repositoryName }) {
yield <div>Cloning repository {repositoryName}...</div>; // [!code highlight:5]
await new Promise(resolve => setTimeout(resolve, 3000));
yield <div>Building repository {repositoryName}...</div>;
await new Promise(resolve => setTimeout(resolve, 2000));
return <div>{repositoryName} deployed!</div>;
},
},
},
onFinish: ({ usage }) => {
const { promptTokens, completionTokens, totalTokens } = usage;
// your own logic, e.g. for saving the chat history or recording usage
console.log('Prompt tokens:', promptTokens);
console.log('Completion tokens:', completionTokens);
console.log('Total tokens:', totalTokens);
},
});
return {
id: generateId(),
role: 'assistant',
display: result.value,
};
}
import { createAI } from 'ai/rsc';
import { ServerMessage, ClientMessage, continueConversation } from './actions';
export const AI = createAI<ServerMessage[], ClientMessage[]>({
actions: {
continueConversation,
},
initialAIState: [],
initialUIState: [],
});