Skip to content

Commit 7a742cd

Browse files
committed
adding turn configuration parameters
1 parent 89d9b65 commit 7a742cd

File tree

2 files changed

+129
-11
lines changed

2 files changed

+129
-11
lines changed

src/inference-api.ts

Lines changed: 113 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,44 @@ export interface WebRTCWorkerConfig {
33
streamOutputNames?: string[];
44
dataOutputNames?: string[];
55
threadPoolWorkers?: number;
6+
/**
7+
* Workflow parameters to pass to the workflow execution
8+
*/
9+
workflowsParameters?: Record<string, any>;
10+
/**
11+
* WebRTC configuration to pass to the server API
12+
* This is passed to the /initialise_webrtc_worker endpoint's webrtc_turn_config field.
13+
*/
14+
webrtcConfig?: {
15+
iceServers?: RTCIceServerConfig[];
16+
};
17+
/**
18+
* Processing timeout in seconds (serverless only)
19+
* @default 600
20+
*/
21+
processingTimeout?: number;
22+
/**
23+
* Requested compute plan (serverless only)
24+
* @example "webrtc-gpu-small"
25+
*/
26+
requestedPlan?: string;
27+
/**
28+
* Requested region for processing (serverless only)
29+
* @example "us"
30+
*/
31+
requestedRegion?: string;
32+
}
33+
34+
/**
35+
* ICE server configuration for WebRTC connections
36+
*
37+
* Use this to configure custom STUN/TURN servers for users behind
38+
* symmetric NAT or restrictive firewalls.
39+
*/
40+
export interface RTCIceServerConfig {
41+
urls: string | string[];
42+
username?: string;
43+
credential?: string;
644
}
745

846
export interface WebRTCOffer {
@@ -30,6 +68,49 @@ export interface WebRTCParams {
3068
streamOutputNames?: string[];
3169
dataOutputNames?: string[];
3270
threadPoolWorkers?: number;
71+
/**
72+
* Workflow parameters to pass to the workflow execution
73+
*/
74+
workflowsParameters?: Record<string, any>;
75+
/**
76+
* Custom ICE servers for RTCPeerConnection (client-side)
77+
*
78+
* Use this to specify custom STUN/TURN servers for users behind
79+
* symmetric NAT or restrictive firewalls.
80+
*
81+
* @example
82+
* ```typescript
83+
* iceServers: [
84+
* { urls: ["stun:stun.l.google.com:19302"] },
85+
* { urls: "turn:turn.example.com:3478", username: "user", credential: "pass" }
86+
* ]
87+
* ```
88+
*/
89+
iceServers?: RTCIceServerConfig[];
90+
/**
91+
* WebRTC configuration to pass to the server API
92+
*
93+
* This is passed to the /initialise_webrtc_worker endpoint's webrtc_turn_config field.
94+
* Use this when the server needs to know about your TURN configuration.
95+
*/
96+
webrtcConfig?: {
97+
iceServers?: RTCIceServerConfig[];
98+
};
99+
/**
100+
* Processing timeout in seconds (serverless only)
101+
* @default 600
102+
*/
103+
processingTimeout?: number;
104+
/**
105+
* Requested compute plan (serverless only)
106+
* @example "webrtc-gpu-small"
107+
*/
108+
requestedPlan?: string;
109+
/**
110+
* Requested region for processing (serverless only)
111+
* @example "us"
112+
*/
113+
requestedRegion?: string;
33114
}
34115

35116
export interface Connector {
@@ -115,13 +196,19 @@ export class InferenceHTTPClient {
115196
imageInputName = "image",
116197
streamOutputNames = [],
117198
dataOutputNames = ["string"],
118-
threadPoolWorkers = 4
199+
threadPoolWorkers = 4,
200+
workflowsParameters = {},
201+
webrtcConfig,
202+
processingTimeout,
203+
requestedPlan,
204+
requestedRegion
119205
} = config;
120206

121207
// Build workflow_configuration based on what's provided
122208
const workflowConfiguration: any = {
123209
type: "WorkflowConfiguration",
124210
image_input_name: imageInputName,
211+
workflows_parameters: workflowsParameters,
125212
workflows_thread_pool_workers: threadPoolWorkers,
126213
cancel_thread_pool_tasks_on_exit: true,
127214
video_metadata_input_name: "video_metadata"
@@ -134,19 +221,30 @@ export class InferenceHTTPClient {
134221
workflowConfiguration.workflow_id = workflowId;
135222
}
136223

137-
const payload = {
224+
const payload: Record<string, any> = {
138225
workflow_configuration: workflowConfiguration,
139226
api_key: this.apiKey,
140227
webrtc_realtime_processing: true,
141228
webrtc_offer: {
142229
sdp: offer.sdp,
143230
type: offer.type
144231
},
145-
webrtc_turn_config: null,
232+
webrtc_turn_config: webrtcConfig ?? null,
146233
stream_output: streamOutputNames,
147234
data_output: dataOutputNames
148235
};
149236

237+
// Add serverless-specific fields if provided
238+
if (processingTimeout !== undefined) {
239+
payload.processing_timeout = processingTimeout;
240+
}
241+
if (requestedPlan !== undefined) {
242+
payload.requested_plan = requestedPlan;
243+
}
244+
if (requestedRegion !== undefined) {
245+
payload.requested_region = requestedRegion;
246+
}
247+
150248
const response = await fetch(`${this.serverUrl}/initialise_webrtc_worker`, {
151249
method: "POST",
152250
headers: { "Content-Type": "application/json" },
@@ -225,7 +323,12 @@ export const connectors = {
225323
imageInputName: wrtcParams.imageInputName,
226324
streamOutputNames: wrtcParams.streamOutputNames,
227325
dataOutputNames: wrtcParams.dataOutputNames,
228-
threadPoolWorkers: wrtcParams.threadPoolWorkers
326+
threadPoolWorkers: wrtcParams.threadPoolWorkers,
327+
workflowsParameters: wrtcParams.workflowsParameters,
328+
webrtcConfig: wrtcParams.webrtcConfig,
329+
processingTimeout: wrtcParams.processingTimeout,
330+
requestedPlan: wrtcParams.requestedPlan,
331+
requestedRegion: wrtcParams.requestedRegion
229332
}
230333
});
231334

@@ -271,7 +374,12 @@ export const connectors = {
271374
* imageInputName: wrtcParams.imageInputName,
272375
* streamOutputNames: wrtcParams.streamOutputNames,
273376
* dataOutputNames: wrtcParams.dataOutputNames,
274-
* threadPoolWorkers: wrtcParams.threadPoolWorkers
377+
* threadPoolWorkers: wrtcParams.threadPoolWorkers,
378+
* workflowsParameters: wrtcParams.workflowsParameters,
379+
* webrtcConfig: wrtcParams.webrtcConfig,
380+
* processingTimeout: wrtcParams.processingTimeout,
381+
* requestedPlan: wrtcParams.requestedPlan,
382+
* requestedRegion: wrtcParams.requestedRegion
275383
* }
276384
* });
277385
* res.json(answer);

src/webrtc.ts

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11

2-
import { InferenceHTTPClient, Connector, WebRTCParams } from "./inference-api";
2+
import { InferenceHTTPClient, Connector, WebRTCParams, RTCIceServerConfig } from "./inference-api";
33
import { stopStream } from "./streams";
44

55
/**
@@ -141,16 +141,23 @@ function setupRemoteStreamListener(pc: RTCPeerConnection): Promise<MediaStream>
141141
});
142142
}
143143

144-
async function preparePeerConnection(localStream: MediaStream): Promise<{
144+
const DEFAULT_ICE_SERVERS: RTCIceServerConfig[] = [
145+
{ urls: ["stun:stun.l.google.com:19302"] }
146+
];
147+
148+
async function preparePeerConnection(
149+
localStream: MediaStream,
150+
customIceServers?: RTCIceServerConfig[]
151+
): Promise<{
145152
pc: RTCPeerConnection;
146153
offer: RTCSessionDescriptionInit;
147154
remoteStreamPromise: Promise<MediaStream>;
148155
dataChannel: RTCDataChannel;
149156
}> {
150-
const stunServer = "stun:stun.l.google.com:19302";
157+
const iceServers = customIceServers ?? DEFAULT_ICE_SERVERS;
151158

152159
const pc = new RTCPeerConnection({
153-
iceServers: [{ urls: [stunServer] }]
160+
iceServers: iceServers as RTCIceServer[]
154161
});
155162

156163
// Add transceiver for receiving remote video (BEFORE adding tracks - order matters!)
@@ -470,8 +477,11 @@ export async function useStream({
470477
// Step 1: Use provided media stream
471478
const localStream = source;
472479

473-
// Step 2: Prepare peer connection and create offer
474-
const { pc, offer, remoteStreamPromise, dataChannel } = await preparePeerConnection(localStream);
480+
// Step 2: Prepare peer connection and create offer (with custom ICE servers if provided)
481+
const { pc, offer, remoteStreamPromise, dataChannel } = await preparePeerConnection(
482+
localStream,
483+
wrtcParams.iceServers
484+
);
475485

476486
// Step 3: Call connector.connectWrtc to exchange SDP and get answer
477487
const answer = await connector.connectWrtc(

0 commit comments

Comments
 (0)