import asyncio
import json
import base64
import websockets
async def test_multi_context():
url = "wss://dev.voice.ai/api/v1/tts/multi-stream"
headers = {
"Authorization": "Bearer YOUR_API_KEY",
}
async with websockets.connect(url, additional_headers=headers) as ws:
# Init context-1 (first message to ctx-1)
await ws.send(json.dumps({
"context_id": "ctx-1",
"voice_id": "VOICE_ID_1",
"text": "Hello from context one.",
"language": "en",
"model": "voiceai-tts-v1-latest",
"flush": True
}))
# Init context-2 (first message to ctx-2, can use different voice)
await ws.send(json.dumps({
"context_id": "ctx-2",
"voice_id": "VOICE_ID_2",
"text": "Hello from context two.",
"language": "en",
"model": "voiceai-tts-v1-latest",
"flush": True
}))
# Receive responses (will be interleaved, all JSON with base64 audio)
audio_by_context = {} # {context_id: bytes}
completed = set()
while len(completed) < 2:
msg = await ws.recv()
data = json.loads(msg)
ctx = data.get("context_id")
if data.get("audio"):
chunk = base64.b64decode(data["audio"])
if ctx not in audio_by_context:
audio_by_context[ctx] = b""
audio_by_context[ctx] += chunk
print(f"Audio chunk for {ctx}: {len(chunk)} bytes")
# Note: is_last is sent as a separate message after all chunks
continue
elif data.get("is_last"):
# Completion message (sent after all audio chunks)
completed.add(ctx)
print(f"{ctx} complete!")
# Send text-only message to existing context
await ws.send(json.dumps({
"context_id": "ctx-1",
"text": "More text for context one.",
"flush": True
}))
# Close a specific context
await ws.send(json.dumps({
"context_id": "ctx-1",
"close_context": True
}))
# Close entire socket
await ws.send(json.dumps({"close_socket": True}))
# Save audio to files
for ctx_id, audio_data in audio_by_context.items():
filename = f"output_{ctx_id}.mp3"
with open(filename, "wb") as f:
f.write(audio_data)
print(f"Saved {ctx_id} audio to {filename} ({len(audio_data)} bytes)")
asyncio.run(test_multi_context())