The OpenAI adapter provides access to OpenAI's models, including GPT-4o, GPT-5, image generation (DALL-E), text-to-speech (TTS), and audio transcription (Whisper).
npm install @tanstack/ai-openainpm install @tanstack/ai-openaiimport { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Hello!" }],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Hello!" }],
});import { chat } from "@tanstack/ai";
import { createOpenaiChat } from "@tanstack/ai-openai";
const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, {
// ... your config options
});
const stream = chat({
adapter: adapter("gpt-5.2"),
messages: [{ role: "user", content: "Hello!" }],
});import { chat } from "@tanstack/ai";
import { createOpenaiChat } from "@tanstack/ai-openai";
const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, {
// ... your config options
});
const stream = chat({
adapter: adapter("gpt-5.2"),
messages: [{ role: "user", content: "Hello!" }],
});import { createOpenaiChat, type OpenAIChatConfig } from "@tanstack/ai-openai";
const config: Omit<OpenAIChatConfig, 'apiKey'> = {
organization: "org-...", // Optional
baseURL: "https://api.openai.com/v1", // Optional, for custom endpoints
};
const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, config);import { createOpenaiChat, type OpenAIChatConfig } from "@tanstack/ai-openai";
const config: Omit<OpenAIChatConfig, 'apiKey'> = {
organization: "org-...", // Optional
baseURL: "https://api.openai.com/v1", // Optional, for custom endpoints
};
const adapter = createOpenaiChat(process.env.OPENAI_API_KEY!, config);import { chat, toServerSentEventsResponse } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages,
});
return toServerSentEventsResponse(stream);
}import { chat, toServerSentEventsResponse } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
export async function POST(request: Request) {
const { messages } = await request.json();
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages,
});
return toServerSentEventsResponse(stream);
}import { chat, toolDefinition } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { z } from "zod";
const getWeatherDef = toolDefinition({
name: "get_weather",
description: "Get the current weather",
inputSchema: z.object({
location: z.string(),
}),
});
const getWeather = getWeatherDef.server(async ({ location }) => {
// Fetch weather data
return { temperature: 72, conditions: "sunny" };
});
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages,
tools: [getWeather],
});import { chat, toolDefinition } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { z } from "zod";
const getWeatherDef = toolDefinition({
name: "get_weather",
description: "Get the current weather",
inputSchema: z.object({
location: z.string(),
}),
});
const getWeather = getWeatherDef.server(async ({ location }) => {
// Fetch weather data
return { temperature: 72, conditions: "sunny" };
});
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages,
tools: [getWeather],
});OpenAI supports various provider-specific options:
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages,
modelOptions: {
temperature: 0.7,
max_tokens: 1000,
top_p: 0.9,
frequency_penalty: 0.5,
presence_penalty: 0.5,
stop: ["END"],
},
});const stream = chat({
adapter: openaiText("gpt-5.2"),
messages,
modelOptions: {
temperature: 0.7,
max_tokens: 1000,
top_p: 0.9,
frequency_penalty: 0.5,
presence_penalty: 0.5,
stop: ["END"],
},
});Enable reasoning for models that support it (e.g., GPT-5, O3). This allows the model to show its reasoning process, which is streamed as thinking chunks:
modelOptions: {
reasoning: {
effort: "medium", // "none" | "minimal" | "low" | "medium" | "high"
summary: "detailed", // "auto" | "detailed" (optional)
},
}modelOptions: {
reasoning: {
effort: "medium", // "none" | "minimal" | "low" | "medium" | "high"
summary: "detailed", // "auto" | "detailed" (optional)
},
}When reasoning is enabled, the model's reasoning process is streamed separately from the response text and appears as a collapsible thinking section in the UI.
Summarize long text content:
import { summarize } from "@tanstack/ai";
import { openaiSummarize } from "@tanstack/ai-openai";
const result = await summarize({
adapter: openaiSummarize("gpt-5-mini"),
text: "Your long text to summarize...",
maxLength: 100,
style: "concise", // "concise" | "bullet-points" | "paragraph"
});
console.log(result.summary);import { summarize } from "@tanstack/ai";
import { openaiSummarize } from "@tanstack/ai-openai";
const result = await summarize({
adapter: openaiSummarize("gpt-5-mini"),
text: "Your long text to summarize...",
maxLength: 100,
style: "concise", // "concise" | "bullet-points" | "paragraph"
});
console.log(result.summary);Generate images with DALL-E:
import { generateImage } from "@tanstack/ai";
import { openaiImage } from "@tanstack/ai-openai";
const result = await generateImage({
adapter: openaiImage("gpt-image-1"),
prompt: "A futuristic cityscape at sunset",
numberOfImages: 1,
size: "1024x1024",
});
console.log(result.images);import { generateImage } from "@tanstack/ai";
import { openaiImage } from "@tanstack/ai-openai";
const result = await generateImage({
adapter: openaiImage("gpt-image-1"),
prompt: "A futuristic cityscape at sunset",
numberOfImages: 1,
size: "1024x1024",
});
console.log(result.images);const result = await generateImage({
adapter: openaiImage("gpt-image-1"),
prompt: "...",
modelOptions: {
quality: "hd", // "standard" | "hd"
style: "natural", // "natural" | "vivid"
},
});const result = await generateImage({
adapter: openaiImage("gpt-image-1"),
prompt: "...",
modelOptions: {
quality: "hd", // "standard" | "hd"
style: "natural", // "natural" | "vivid"
},
});Generate speech from text:
import { generateSpeech } from "@tanstack/ai";
import { openaiTTS } from "@tanstack/ai-openai";
const result = await generateSpeech({
adapter: openaiTTS("tts-1"),
text: "Hello, welcome to TanStack AI!",
voice: "alloy",
format: "mp3",
});
// result.audio contains base64-encoded audio
console.log(result.format); // "mp3"import { generateSpeech } from "@tanstack/ai";
import { openaiTTS } from "@tanstack/ai-openai";
const result = await generateSpeech({
adapter: openaiTTS("tts-1"),
text: "Hello, welcome to TanStack AI!",
voice: "alloy",
format: "mp3",
});
// result.audio contains base64-encoded audio
console.log(result.format); // "mp3"Available voices: alloy, echo, fable, onyx, nova, shimmer, ash, ballad, coral, sage, verse
const result = await generateSpeech({
adapter: openaiTTS("tts-1-hd"),
text: "High quality speech",
modelOptions: {
speed: 1.0, // 0.25 to 4.0
},
});const result = await generateSpeech({
adapter: openaiTTS("tts-1-hd"),
text: "High quality speech",
modelOptions: {
speed: 1.0, // 0.25 to 4.0
},
});Transcribe audio to text:
import { generateTranscription } from "@tanstack/ai";
import { openaiTranscription } from "@tanstack/ai-openai";
const result = await generateTranscription({
adapter: openaiTranscription("whisper-1"),
audio: audioFile, // File object or base64 string
language: "en",
});
console.log(result.text); // Transcribed textimport { generateTranscription } from "@tanstack/ai";
import { openaiTranscription } from "@tanstack/ai-openai";
const result = await generateTranscription({
adapter: openaiTranscription("whisper-1"),
audio: audioFile, // File object or base64 string
language: "en",
});
console.log(result.text); // Transcribed textconst result = await generateTranscription({
adapter: openaiTranscription("whisper-1"),
audio: audioFile,
modelOptions: {
response_format: "verbose_json", // Get timestamps
temperature: 0,
prompt: "Technical terms: API, SDK",
},
});
// Access segments with timestamps
console.log(result.segments);const result = await generateTranscription({
adapter: openaiTranscription("whisper-1"),
audio: audioFile,
modelOptions: {
response_format: "verbose_json", // Get timestamps
temperature: 0,
prompt: "Technical terms: API, SDK",
},
});
// Access segments with timestamps
console.log(result.segments);Set your API key in environment variables:
OPENAI_API_KEY=sk-...OPENAI_API_KEY=sk-...Creates an OpenAI chat adapter using environment variables.
Returns: An OpenAI chat adapter instance.
Creates an OpenAI chat adapter with an explicit API key.
Parameters:
Returns: An OpenAI chat adapter instance.
Creates an OpenAI summarization adapter using environment variables.
Returns: An OpenAI summarize adapter instance.
Creates an OpenAI summarization adapter with an explicit API key.
Returns: An OpenAI summarize adapter instance.
Creates an OpenAI image generation adapter using environment variables.
Returns: An OpenAI image adapter instance.
Creates an OpenAI image generation adapter with an explicit API key.
Returns: An OpenAI image adapter instance.
Creates an OpenAI TTS adapter using environment variables.
Returns: An OpenAI TTS adapter instance.
Creates an OpenAI TTS adapter with an explicit API key.
Returns: An OpenAI TTS adapter instance.
Creates an OpenAI transcription adapter using environment variables.
Returns: An OpenAI transcription adapter instance.
Creates an OpenAI transcription adapter with an explicit API key.
Returns: An OpenAI transcription adapter instance.
OpenAI exposes several native tools beyond user-defined function calls. Import them from @tanstack/ai-openai/tools and pass them into chat({ tools: [...] }).
For the full concept, a comparison matrix, and type-gating details, see Provider Tools.
Enables the model to run a web search and return grounded results with citations. Pass a WebSearchToolConfig object (typed from the OpenAI SDK) to configure the tool.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { webSearchTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "What's new in AI this week?" }],
tools: [webSearchTool({ type: "web_search" })],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { webSearchTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "What's new in AI this week?" }],
tools: [webSearchTool({ type: "web_search" })],
});Supported models: GPT-4o, GPT-5, and Responses API-capable models. See Provider Tools.
The preview variant of web search with additional options for controlling search context size and user location. Use this when you want fine-grained control over the search context sent to the model.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { webSearchPreviewTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Latest news about TypeScript" }],
tools: [
webSearchPreviewTool({
type: "web_search_preview_2025_03_11",
search_context_size: "high",
}),
],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { webSearchPreviewTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Latest news about TypeScript" }],
tools: [
webSearchPreviewTool({
type: "web_search_preview_2025_03_11",
search_context_size: "high",
}),
],
});Supported models: GPT-4o and above. See Provider Tools.
Searches OpenAI vector stores that you have pre-populated, letting the model retrieve relevant document chunks. Provide the vector_store_ids to search and optionally limit results with max_num_results (1–50).
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { fileSearchTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "What does the handbook say about PTO?" }],
tools: [
fileSearchTool({
type: "file_search",
vector_store_ids: ["vs_abc123"],
max_num_results: 5,
}),
],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { fileSearchTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "What does the handbook say about PTO?" }],
tools: [
fileSearchTool({
type: "file_search",
vector_store_ids: ["vs_abc123"],
max_num_results: 5,
}),
],
});Supported models: GPT-4o and above. See Provider Tools.
Allows the model to generate images inline during a conversation using DALL-E/GPT-Image. Pass quality, size, and style options via the config object.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { imageGenerationTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Draw a logo for my app" }],
tools: [
imageGenerationTool({
quality: "high",
size: "1024x1024",
}),
],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { imageGenerationTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Draw a logo for my app" }],
tools: [
imageGenerationTool({
quality: "high",
size: "1024x1024",
}),
],
});Supported models: GPT-5 and GPT-Image-capable models. See Provider Tools.
Gives the model a sandboxed Python execution environment. The container field configures the execution environment; pass the full CodeInterpreterToolConfig object.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { codeInterpreterTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Analyse this CSV and plot a chart" }],
tools: [
codeInterpreterTool({ type: "code_interpreter", container: { type: "auto" } }),
],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { codeInterpreterTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Analyse this CSV and plot a chart" }],
tools: [
codeInterpreterTool({ type: "code_interpreter", container: { type: "auto" } }),
],
});Supported models: GPT-4o and above. See Provider Tools.
Connects the model to a remote MCP (Model Context Protocol) server, exposing all its capabilities as callable tools. Provide either server_url or connector_id — not both.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { mcpTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "List my GitHub issues" }],
tools: [
mcpTool({
server_url: "https://mcp.example.com",
server_label: "github",
}),
],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { mcpTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "List my GitHub issues" }],
tools: [
mcpTool({
server_url: "https://mcp.example.com",
server_label: "github",
}),
],
});Supported models: GPT-4o and above. See Provider Tools.
Lets the model observe a virtual desktop via screenshots and interact with it using keyboard and mouse events. Provide the display dimensions and the execution environment type.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { computerUseTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("computer-use-preview"),
messages: [{ role: "user", content: "Open Chrome and navigate to example.com" }],
tools: [
computerUseTool({
type: "computer_use_preview",
display_width: 1024,
display_height: 768,
environment: "browser",
}),
],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { computerUseTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("computer-use-preview"),
messages: [{ role: "user", content: "Open Chrome and navigate to example.com" }],
tools: [
computerUseTool({
type: "computer_use_preview",
display_width: 1024,
display_height: 768,
environment: "browser",
}),
],
});Supported models: computer-use-preview. See Provider Tools.
Provides the model with a local shell for executing system commands. Takes no arguments — the tool is enabled simply by including it in the tools array.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { localShellTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Run the test suite and summarise failures" }],
tools: [localShellTool()],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { localShellTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Run the test suite and summarise failures" }],
tools: [localShellTool()],
});Supported models: GPT-5.x and other agent-capable models. See Provider Tools.
A function-style shell tool that exposes shell execution as a structured function call. Takes no arguments.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { shellTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Count lines in all JS files" }],
tools: [shellTool()],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { shellTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Count lines in all JS files" }],
tools: [shellTool()],
});Supported models: GPT-5.x and other agent-capable models. See Provider Tools.
Lets the model apply unified-diff patches to modify files directly. Takes no arguments — include it in the tools array to enable patch application.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { applyPatchTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Fix the import paths in src/index.ts" }],
tools: [applyPatchTool()],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { applyPatchTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Fix the import paths in src/index.ts" }],
tools: [applyPatchTool()],
});Supported models: GPT-5.x and other agent-capable models. See Provider Tools.
Defines a custom Responses API tool with an explicit name, description, and format. Use this when none of the structured tool types fits your use case. Unlike branded provider tools, customTool returns a plain Tool and is accepted by any chat model.
import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { customTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Look up order #1234" }],
tools: [
customTool({
type: "custom",
name: "lookup_order",
description: "Look up the status of a customer order by order ID",
}),
],
});import { chat } from "@tanstack/ai";
import { openaiText } from "@tanstack/ai-openai";
import { customTool } from "@tanstack/ai-openai/tools";
const stream = chat({
adapter: openaiText("gpt-5.2"),
messages: [{ role: "user", content: "Look up order #1234" }],
tools: [
customTool({
type: "custom",
name: "lookup_order",
description: "Look up the status of a customer order by order ID",
}),
],
});Supported models: all Responses API models. See Provider Tools.