Basic Chat Completion
Simple chat completion with guardrails protection:Copy
import OpenAI from 'openai';
const openai = new OpenAI({
baseURL: 'https://gateway.oximy.com/v1',
apiKey: 'your-openai-api-key',
defaultHeaders: {
'x-oximy-api-key': 'oxi-live-YOUR_API_KEY',
'x-oximy-project-id': 'your-project-slug'
}
});
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'My email is [email protected]. Help me write a professional email.' }
],
max_tokens: 500,
temperature: 0.7
});
console.log('AI Response:', response.choices[0].message.content);
console.log('Violations detected:', response.headers['x-oximy-violations']);
Streaming Response
Real-time streaming with guardrails processing:Copy
const stream = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Tell me a story about AI security' }],
stream: true
});
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
process.stdout.write(content); // Already sanitized by guardrails
}
}
Using Different Providers
Switch between AI providers by specifying the provider in headers:Copy
// Use Anthropic Claude
const claudeResponse = await openai.chat.completions.create({
model: 'claude-3-5-sonnet-20241022',
messages: [{ role: 'user', content: 'Hello Claude!' }]
}, {
headers: { 'x-oximy-provider': 'anthropic' }
});
// Use Google Gemini
const geminiResponse = await openai.chat.completions.create({
model: 'gemini-1.5-pro',
messages: [{ role: 'user', content: 'Hello Gemini!' }]
}, {
headers: { 'x-oximy-provider': 'google' }
});
console.log('Claude:', claudeResponse.choices[0].message.content);
console.log('Gemini:', geminiResponse.choices[0].message.content);
Multi-Provider Fallback
Implement fallback logic for reliability:Copy
async function robustRequest(prompt, maxRetries = 3) {
const providers = [
{ name: 'openai', model: 'gpt-4o' },
{ name: 'anthropic', model: 'claude-3-5-sonnet-20241022' },
{ name: 'google', model: 'gemini-1.5-pro' }
];
for (let attempt = 1; attempt <= maxRetries; attempt++) {
for (const provider of providers) {
try {
const response = await openai.chat.completions.create({
model: provider.model,
messages: [{ role: 'user', content: prompt }]
}, {
headers: { 'x-oximy-provider': provider.name }
});
return response;
} catch (error) {
console.log(`Provider ${provider.name} failed, trying next...`);
continue;
}
}
if (attempt < maxRetries) {
console.log(`Attempt ${attempt} failed, retrying...`);
await new Promise(resolve => setTimeout(resolve, Math.pow(2, attempt) * 1000));
}
}
throw new Error('All providers failed');
}
// Usage
try {
const response = await robustRequest('What is AI security?');
console.log(response.choices[0].message.content);
} catch (error) {
console.error('Request failed:', error.message);
}
Checking Guardrails Violations
Monitor and respond to guardrails violations:Copy
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{
role: 'user',
content: 'My email is [email protected] and my API key is sk-1234567890abcdef'
}]
});
// Check violation details
const violations = response.headers['x-oximy-violations'];
const status = response.headers['x-oximy-guardrails-status'];
if (violations > 0) {
console.log(`WARNING: ${violations} violations detected`);
console.log(`Status: ${status}`);
// Log for audit purposes
console.log('Request contained sensitive data that was sanitized');
} else {
console.log('SUCCESS: No violations detected');
}
console.log('AI Response:', response.choices[0].message.content);
Disabling Guardrails for Testing
Temporarily disable guardrails during development:Copy
// Development/testing mode
const isDevelopment = process.env.NODE_ENV === 'development';
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: 'Test with sensitive data' }]
}, {
headers: {
'x-oximy-disable': isDevelopment ? 'true' : 'false'
}
});
console.log('Guardrails disabled:', response.headers['x-oximy-guardrails-status'] === 'bypassed');
Error Handling
Comprehensive error handling with retry logic:Copy
async function safeRequest(prompt, maxRetries = 3) {
for (let attempt = 1; attempt <= maxRetries; attempt++) {
try {
const response = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: prompt }]
});
return response;
} catch (error) {
console.error(`Attempt ${attempt} failed:`, error.message);
if (error.status === 429) {
// Rate limited - wait and retry
const delay = Math.pow(2, attempt) * 1000;
console.log(`Rate limited, waiting ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
continue;
} else if (error.status >= 500) {
// Server error - retry
if (attempt < maxRetries) {
console.log('Server error, retrying...');
continue;
}
}
throw error;
}
}
}
// Usage
try {
const response = await safeRequest('What is Oximy?');
console.log(response.choices[0].message.content);
} catch (error) {
console.error('Request failed after all retries:', error.message);
}
Environment Configuration
Complete setup with environment variables:Copy
# Oximy Gateway credentials
OXIMY_API_KEY=oxi-live-YOUR_API_KEY
OXIMY_PROJECT_ID=your-project-slug
# AI Provider credentials
OPENAI_API_KEY=sk-your-openai-key
ANTHROPIC_API_KEY=sk-ant-your-anthropic-key
GOOGLE_API_KEY=your-google-api-key
# Environment
NODE_ENV=production