Leverage OpenAI’s Realtime API with logging, cost tracking, and additional features!
OpenAI’s Realtime API, while being the fastest way to utilize multi-modal generation, comes with challenges such as logging, cost tracking, and implementing guardrails.
Obiguard’s AI Gateway addresses these challenges through seamless integration. It’s logging feature uniquely captures the complete request and response, including the model’s output, cost, and any guardrail violations.
from obiguard import AsyncObiguard as Obiguard, OBIGUARD_GATEWAY_URLimport asyncioasync def main(): client = Obiguard( virtual_key="vk-obg-***", # Your Obiguard virtual key here base_url=OBIGUARD_GATEWAY_URL, ) async with client.beta.realtime.connect( model="gpt-4o-realtime-preview-2024-10-01" # Replace with the model you want to use ) as connection: await connection.session.update(session={'modalities': ['text']}) await connection.conversation.item.create( item={ "type": "message", "role": "user", "content": [{"type": "input_text", "text": "Say hello!"}], } ) await connection.response.create() async for event in connection: if event.type == 'response.text.delta': print(event.delta, flush=True, end="") elif event.type == 'response.text.done': print() elif event.type == "response.done": breakasyncio.run(main())
Copy
from obiguard import AsyncObiguard as Obiguard, OBIGUARD_GATEWAY_URLimport asyncioasync def main(): client = Obiguard( virtual_key="vk-obg-***", # Your Obiguard virtual key here base_url=OBIGUARD_GATEWAY_URL, ) async with client.beta.realtime.connect( model="gpt-4o-realtime-preview-2024-10-01" # Replace with the model you want to use ) as connection: await connection.session.update(session={'modalities': ['text']}) await connection.conversation.item.create( item={ "type": "message", "role": "user", "content": [{"type": "input_text", "text": "Say hello!"}], } ) await connection.response.create() async for event in connection: if event.type == 'response.text.delta': print(event.delta, flush=True, end="") elif event.type == 'response.text.done': print() elif event.type == "response.done": breakasyncio.run(main())
Copy
import asynciofrom openai import AsyncOpenAIfrom obiguard import createHeaders, OBIGUARD_GATEWAY_URLasync def main(): headers = createHeaders( provider="openai", virtual_key="vk-obg-***" # Your Obiguard virtual key here ) client = AsyncOpenAI( base_url=OBIGUARD_GATEWAY_URL, ) async with client.beta.realtime.connect( model="gpt-4o-realtime-preview", extra_headers=headers # Replace with the model you want to use ) as connection: await connection.session.update(session={'modalities': ['text']}) await connection.conversation.item.create( item={ "type": "message", "role": "user", "content": [{"type": "input_text", "text": "Say hello!"}], } ) await connection.response.create() async for event in connection: if event.type == 'response.text.delta': print(event.delta, flush=True, end="") elif event.type == 'response.text.done': print() elif event.type == "response.done": breakasyncio.run(main())
Copy
# we're using websocat for this example, but you can use any websocket clientwebsocat "wss://gateway.obiguard.ai/v1/realtime?model=gpt-4o-realtime-preview-2024-10-01" \ -H "x-obiguard-provider: openai" \ -H "x-obiguard-api-key: sk-obg***" \ -H "x-obiguard-OpenAI-Beta: realtime=v1"# once connected, you can send your messages as you would with OpenAI's Realtime API
If you prefer not to store your API keys with Obiguard, you can include your OpenAI key in the Authorization header instead.