Integration
Overview
Floopy is compatible with the OpenAI SDK. To start routing requests through the gateway, change your baseURL to https://api.floopy.ai/v1 and use your Floopy API key. No additional SDK or library is required.
Quick Start
import { OpenAI } from "openai";
const client = new OpenAI({ baseURL: "https://api.floopy.ai/v1", apiKey: process.env.FLOOPY_API_KEY,});
const response = await client.chat.completions.create({ model: "gpt-4o", messages: [{ role: "user", content: "Explain quantum computing in one sentence." }],});
console.log(response.choices[0].message.content);from openai import OpenAIimport os
client = OpenAI( base_url="https://api.floopy.ai/v1", api_key=os.environ["FLOOPY_API_KEY"],)
response = client.chat.completions.create( model="gpt-4o", messages=[{"role": "user", "content": "Explain quantum computing in one sentence."}],)
print(response.choices[0].message.content)curl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -d '{ "model": "gpt-4o", "messages": [{"role": "user", "content": "Explain quantum computing in one sentence."}] }'Session Tracking
Track conversations across multiple requests by passing a session ID in the floopy-session-id header. You can also set floopy-session-name and floopy-session-path for richer context. These group related requests together in the dashboard logs, making it easy to follow a full conversation flow.
const response = await client.chat.completions.create( { model: "gpt-4o", messages: [{ role: "user", content: "Hello" }], }, { headers: { "floopy-session-id": "session_abc123", "floopy-session-name": "Onboarding Chat", "floopy-session-path": "/app/onboarding", }, },);response = client.chat.completions.create( model="gpt-4o", messages=[{"role": "user", "content": "Hello"}], extra_headers={ "floopy-session-id": "session_abc123", "floopy-session-name": "Onboarding Chat", "floopy-session-path": "/app/onboarding", },)curl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -H "floopy-session-id: session_abc123" \ -H "floopy-session-name: Onboarding Chat" \ -H "floopy-session-path: /app/onboarding" \ -d '{ "model": "gpt-4o", "messages": [{"role": "user", "content": "Hello"}] }'Project Tracking
Segment requests by project by passing the floopy-project-id header. This tags the request with a specific project for per-project cost tracking, dashboards, and analytics. If your API key is hard-locked to a project, this header is optional — the locked project is used automatically.
const response = await client.chat.completions.create( { model: "gpt-4o", messages: [{ role: "user", content: "Hello" }], }, { headers: { "floopy-project-id": "a1b2c3d4-5678-9abc-def0-123456789abc", }, },);response = client.chat.completions.create( model="gpt-4o", messages=[{"role": "user", "content": "Hello"}], extra_headers={ "floopy-project-id": "a1b2c3d4-5678-9abc-def0-123456789abc", },)curl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -H "floopy-project-id: a1b2c3d4-5678-9abc-def0-123456789abc" \ -d '{ "model": "gpt-4o", "messages": [{"role": "user", "content": "Hello"}] }'req, _ := http.NewRequest("POST", "https://api.floopy.ai/v1/chat/completions", body)req.Header.Set("Authorization", "Bearer "+os.Getenv("FLOOPY_API_KEY"))req.Header.Set("Content-Type", "application/json")req.Header.Set("floopy-project-id", "a1b2c3d4-5678-9abc-def0-123456789abc")
resp, err := http.DefaultClient.Do(req)See the Projects feature guide for fallback chain details, per-project API keys, and environment model.
User Tracking
Use the floopy-user-id header (or the OpenAI user field) to associate requests with a specific end user. This appears in your dashboard logs and helps with per-user analytics and abuse detection.
const response = await client.chat.completions.create( { model: "gpt-4o", messages: [{ role: "user", content: "Hello" }], }, { headers: { "floopy-user-id": "user_12345", }, },);response = client.chat.completions.create( model="gpt-4o", messages=[{"role": "user", "content": "Hello"}], extra_headers={ "floopy-user-id": "user_12345", },)curl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -H "floopy-user-id: user_12345" \ -d '{ "model": "gpt-4o", "messages": [{"role": "user", "content": "Hello"}] }'Custom Properties
Attach arbitrary metadata to requests using individual floopy-property-* headers. Each header follows the pattern floopy-property-<name>: <value>. You can add as many properties as you need.
const response = await client.chat.completions.create( { model: "gpt-4o", messages: [{ role: "user", content: "Hello" }], }, { headers: { "floopy-property-environment": "production", "floopy-property-feature": "chat-widget", "floopy-property-version": "2.1.0", "floopy-property-usertier": "premium", }, },);response = client.chat.completions.create( model="gpt-4o", messages=[{"role": "user", "content": "Hello"}], extra_headers={ "floopy-property-environment": "production", "floopy-property-feature": "chat-widget", "floopy-property-version": "2.1.0", "floopy-property-usertier": "premium", },)curl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -H "floopy-property-environment: production" \ -H "floopy-property-feature: chat-widget" \ -H "floopy-property-version: 2.1.0" \ -H "floopy-property-usertier: premium" \ -d '{ "model": "gpt-4o", "messages": [{"role": "user", "content": "Hello"}] }'These properties are searchable and filterable in the dashboard logs.
Switching Providers
Because Floopy translates all requests into a unified format, you can switch providers by changing the model name. No other code changes are needed:
// Use OpenAIconst a = await client.chat.completions.create({ model: "gpt-4o", messages,});
// Use Anthropicconst b = await client.chat.completions.create({ model: "claude-3-5-sonnet-20241022", messages,});
// Use Google Geminiconst c = await client.chat.completions.create({ model: "gemini-2.5-pro", messages,});# Use OpenAIa = client.chat.completions.create( model="gpt-4o", messages=messages,)
# Use Anthropicb = client.chat.completions.create( model="claude-3-5-sonnet-20241022", messages=messages,)
# Use Google Geminic = client.chat.completions.create( model="gemini-2.5-pro", messages=messages,)# Use OpenAIcurl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -d '{"model": "gpt-4o", "messages": [{"role": "user", "content": "Hello"}]}'
# Use Anthropiccurl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -d '{"model": "claude-3-5-sonnet-20241022", "messages": [{"role": "user", "content": "Hello"}]}'
# Use Google Geminicurl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -d '{"model": "gemini-2.5-pro", "messages": [{"role": "user", "content": "Hello"}]}'Make sure the corresponding provider is configured in Settings > Providers. See the Providers guide for setup instructions.
Model Override
Use the floopy-model-override header to override the model specified in the request body. This lets you change which model handles the request without modifying your application code.
import { OpenAI } from "openai";
const client = new OpenAI({ baseURL: "https://api.floopy.ai/v1", apiKey: process.env.FLOOPY_API_KEY,});
// Request body says gpt-4o, but the gateway will use claude-3-5-sonnet-20241022const response = await client.chat.completions.create( { model: "gpt-4o", messages: [{ role: "user", content: "Hello" }], }, { headers: { "floopy-model-override": "claude-3-5-sonnet-20241022", }, },);
console.log(response.choices[0].message.content);from openai import OpenAIimport os
client = OpenAI( base_url="https://api.floopy.ai/v1", api_key=os.environ["FLOOPY_API_KEY"],)
# Request body says gpt-4o, but the gateway will use claude-3-5-sonnet-20241022response = client.chat.completions.create( model="gpt-4o", messages=[{"role": "user", "content": "Hello"}], extra_headers={ "floopy-model-override": "claude-3-5-sonnet-20241022", },)
print(response.choices[0].message.content)curl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -H "floopy-model-override: claude-3-5-sonnet-20241022" \ -d '{ "model": "gpt-4o", "messages": [{"role": "user", "content": "Hello"}] }'Routing Rule Override
Use the floopy-routing-rule header to override the default routing configuration for a request. This directs the request to a specific routing rule you have configured in the dashboard.
const response = await client.chat.completions.create( { model: "gpt-4o", messages: [{ role: "user", content: "Hello" }], }, { headers: { "floopy-routing-rule": "low-latency-us-east", }, },);response = client.chat.completions.create( model="gpt-4o", messages=[{"role": "user", "content": "Hello"}], extra_headers={ "floopy-routing-rule": "low-latency-us-east", },)curl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -H "floopy-routing-rule: low-latency-us-east" \ -d '{ "model": "gpt-4o", "messages": [{"role": "user", "content": "Hello"}] }'Response Headers
The gateway includes informational headers in every response. These tell you which provider and model handled the request, and whether a fallback was used.
| Header | Description |
|---|---|
Floopy-Provider | The provider that handled the request (e.g. openai, anthropic, google) |
Floopy-Model | The model that was used (e.g. gpt-4o, claude-3-5-sonnet-20241022) |
Floopy-Fallback-Used | "true" if the primary provider failed and a fallback provider handled the request |
const res = await fetch("https://api.floopy.ai/v1/chat/completions", { method: "POST", headers: { "Authorization": `Bearer ${process.env.FLOOPY_API_KEY}`, "Content-Type": "application/json", }, body: JSON.stringify({ model: "gpt-4o", messages: [{ role: "user", content: "Hello" }], }),});
console.log("Provider:", res.headers.get("Floopy-Provider"));console.log("Model:", res.headers.get("Floopy-Model"));console.log("Fallback Used:", res.headers.get("Floopy-Fallback-Used"));
const data = await res.json();console.log(data.choices[0].message.content);from openai import OpenAIimport os
client = OpenAI( base_url="https://api.floopy.ai/v1", api_key=os.environ["FLOOPY_API_KEY"],)
response = client.chat.completions.with_raw_response.create( model="gpt-4o", messages=[{"role": "user", "content": "Hello"}],)
print("Provider:", response.headers.get("Floopy-Provider"))print("Model:", response.headers.get("Floopy-Model"))print("Fallback Used:", response.headers.get("Floopy-Fallback-Used"))
completion = response.parse()print(completion.choices[0].message.content)curl -i https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -d '{ "model": "gpt-4o", "messages": [{"role": "user", "content": "Hello"}] }'
# The -i flag prints response headers, including:# Floopy-Provider: openai# Floopy-Model: gpt-4o# Floopy-Fallback-Used: falseStreaming
Floopy supports streaming responses. Use the stream parameter as you normally would:
const stream = await client.chat.completions.create({ model: "gpt-4o", messages: [{ role: "user", content: "Write a short poem." }], stream: true,});
for await (const chunk of stream) { process.stdout.write(chunk.choices[0]?.delta?.content || "");}stream = client.chat.completions.create( model="gpt-4o", messages=[{"role": "user", "content": "Write a short poem."}], stream=True,)
for chunk in stream: content = chunk.choices[0].delta.content if content: print(content, end="")curl https://api.floopy.ai/v1/chat/completions \ -H "Authorization: Bearer $FLOOPY_API_KEY" \ -H "Content-Type: application/json" \ -d '{ "model": "gpt-4o", "messages": [{"role": "user", "content": "Write a short poem."}], "stream": true }'