Skip to content

Commit 9c2e6f0

Browse files
authored
Merge pull request #4 from roboflow/adding-turn-config-parameters
adding turn configuration parameters
2 parents 89d9b65 + 7492c7f commit 9c2e6f0

File tree

3 files changed

+139
-12
lines changed

3 files changed

+139
-12
lines changed

.github/workflows/test.yml

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
name: Test
2+
3+
on:
4+
push:
5+
branches: [master, main]
6+
pull_request:
7+
branches: [master, main]
8+
9+
jobs:
10+
test:
11+
runs-on: ubuntu-latest
12+
steps:
13+
- uses: actions/checkout@v4
14+
- uses: actions/setup-node@v4
15+
with:
16+
node-version: '20'
17+
cache: 'npm'
18+
- run: npm ci
19+
- run: npm run test

src/inference-api.ts

Lines changed: 104 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,41 @@ export interface WebRTCWorkerConfig {
33
streamOutputNames?: string[];
44
dataOutputNames?: string[];
55
threadPoolWorkers?: number;
6+
/**
7+
* Workflow parameters to pass to the workflow execution
8+
*/
9+
workflowsParameters?: Record<string, any>;
10+
/**
11+
* ICE servers for WebRTC connections (used for both client and server)
12+
*/
13+
iceServers?: RTCIceServerConfig[];
14+
/**
15+
* Processing timeout in seconds (serverless only)
16+
* @default 600
17+
*/
18+
processingTimeout?: number;
19+
/**
20+
* Requested compute plan (serverless only)
21+
* @example "webrtc-gpu-small"
22+
*/
23+
requestedPlan?: string;
24+
/**
25+
* Requested region for processing (serverless only)
26+
* @example "us"
27+
*/
28+
requestedRegion?: string;
29+
}
30+
31+
/**
32+
* ICE server configuration for WebRTC connections
33+
*
34+
* Use this to configure custom STUN/TURN servers for users behind
35+
* symmetric NAT or restrictive firewalls.
36+
*/
37+
export interface RTCIceServerConfig {
38+
urls: string[];
39+
username?: string;
40+
credential?: string;
641
}
742

843
export interface WebRTCOffer {
@@ -30,6 +65,42 @@ export interface WebRTCParams {
3065
streamOutputNames?: string[];
3166
dataOutputNames?: string[];
3267
threadPoolWorkers?: number;
68+
/**
69+
* Workflow parameters to pass to the workflow execution
70+
*/
71+
workflowsParameters?: Record<string, any>;
72+
/**
73+
* ICE servers for WebRTC connections (used for both client and server)
74+
*
75+
* Use this to specify custom STUN/TURN servers for users behind
76+
* symmetric NAT or restrictive firewalls. The same configuration is
77+
* used for both the client-side RTCPeerConnection and sent to the
78+
* server via webrtc_config.
79+
*
80+
* @example
81+
* ```typescript
82+
* iceServers: [
83+
* { urls: ["stun:stun.l.google.com:19302"] },
84+
* { urls: ["turn:turn.example.com:3478"], username: "user", credential: "pass" }
85+
* ]
86+
* ```
87+
*/
88+
iceServers?: RTCIceServerConfig[];
89+
/**
90+
* Processing timeout in seconds (serverless only)
91+
* @default 600
92+
*/
93+
processingTimeout?: number;
94+
/**
95+
* Requested compute plan (serverless only)
96+
* @example "webrtc-gpu-small"
97+
*/
98+
requestedPlan?: string;
99+
/**
100+
* Requested region for processing (serverless only)
101+
* @example "us"
102+
*/
103+
requestedRegion?: string;
33104
}
34105

35106
export interface Connector {
@@ -115,13 +186,19 @@ export class InferenceHTTPClient {
115186
imageInputName = "image",
116187
streamOutputNames = [],
117188
dataOutputNames = ["string"],
118-
threadPoolWorkers = 4
189+
threadPoolWorkers = 4,
190+
workflowsParameters = {},
191+
iceServers,
192+
processingTimeout,
193+
requestedPlan,
194+
requestedRegion
119195
} = config;
120196

121197
// Build workflow_configuration based on what's provided
122198
const workflowConfiguration: any = {
123199
type: "WorkflowConfiguration",
124200
image_input_name: imageInputName,
201+
workflows_parameters: workflowsParameters,
125202
workflows_thread_pool_workers: threadPoolWorkers,
126203
cancel_thread_pool_tasks_on_exit: true,
127204
video_metadata_input_name: "video_metadata"
@@ -134,19 +211,30 @@ export class InferenceHTTPClient {
134211
workflowConfiguration.workflow_id = workflowId;
135212
}
136213

137-
const payload = {
214+
const payload: Record<string, any> = {
138215
workflow_configuration: workflowConfiguration,
139216
api_key: this.apiKey,
140217
webrtc_realtime_processing: true,
141218
webrtc_offer: {
142219
sdp: offer.sdp,
143220
type: offer.type
144221
},
145-
webrtc_turn_config: null,
222+
webrtc_config: iceServers ? { iceServers } : null,
146223
stream_output: streamOutputNames,
147224
data_output: dataOutputNames
148225
};
149226

227+
// Add serverless-specific fields if provided
228+
if (processingTimeout !== undefined) {
229+
payload.processing_timeout = processingTimeout;
230+
}
231+
if (requestedPlan !== undefined) {
232+
payload.requested_plan = requestedPlan;
233+
}
234+
if (requestedRegion !== undefined) {
235+
payload.requested_region = requestedRegion;
236+
}
237+
console.trace("payload", payload);
150238
const response = await fetch(`${this.serverUrl}/initialise_webrtc_worker`, {
151239
method: "POST",
152240
headers: { "Content-Type": "application/json" },
@@ -215,7 +303,7 @@ export const connectors = {
215303
return {
216304
connectWrtc: async (offer: WebRTCOffer, wrtcParams: WebRTCParams): Promise<WebRTCWorkerResponse> => {
217305
const client = InferenceHTTPClient.init({ apiKey, serverUrl });
218-
306+
console.log("wrtcParams", wrtcParams);
219307
const answer = await client.initializeWebrtcWorker({
220308
offer,
221309
workflowSpec: wrtcParams.workflowSpec,
@@ -225,7 +313,12 @@ export const connectors = {
225313
imageInputName: wrtcParams.imageInputName,
226314
streamOutputNames: wrtcParams.streamOutputNames,
227315
dataOutputNames: wrtcParams.dataOutputNames,
228-
threadPoolWorkers: wrtcParams.threadPoolWorkers
316+
threadPoolWorkers: wrtcParams.threadPoolWorkers,
317+
workflowsParameters: wrtcParams.workflowsParameters,
318+
iceServers: wrtcParams.iceServers,
319+
processingTimeout: wrtcParams.processingTimeout,
320+
requestedPlan: wrtcParams.requestedPlan,
321+
requestedRegion: wrtcParams.requestedRegion
229322
}
230323
});
231324

@@ -271,7 +364,12 @@ export const connectors = {
271364
* imageInputName: wrtcParams.imageInputName,
272365
* streamOutputNames: wrtcParams.streamOutputNames,
273366
* dataOutputNames: wrtcParams.dataOutputNames,
274-
* threadPoolWorkers: wrtcParams.threadPoolWorkers
367+
* threadPoolWorkers: wrtcParams.threadPoolWorkers,
368+
* workflowsParameters: wrtcParams.workflowsParameters,
369+
* iceServers: wrtcParams.iceServers,
370+
* processingTimeout: wrtcParams.processingTimeout,
371+
* requestedPlan: wrtcParams.requestedPlan,
372+
* requestedRegion: wrtcParams.requestedRegion
275373
* }
276374
* });
277375
* res.json(answer);

src/webrtc.ts

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11

2-
import { InferenceHTTPClient, Connector, WebRTCParams } from "./inference-api";
2+
import { InferenceHTTPClient, Connector, WebRTCParams, RTCIceServerConfig } from "./inference-api";
33
import { stopStream } from "./streams";
44

55
/**
@@ -141,16 +141,23 @@ function setupRemoteStreamListener(pc: RTCPeerConnection): Promise<MediaStream>
141141
});
142142
}
143143

144-
async function preparePeerConnection(localStream: MediaStream): Promise<{
144+
const DEFAULT_ICE_SERVERS: RTCIceServerConfig[] = [
145+
{ urls: ["stun:stun.l.google.com:19302"] }
146+
];
147+
148+
async function preparePeerConnection(
149+
localStream: MediaStream,
150+
customIceServers?: RTCIceServerConfig[]
151+
): Promise<{
145152
pc: RTCPeerConnection;
146153
offer: RTCSessionDescriptionInit;
147154
remoteStreamPromise: Promise<MediaStream>;
148155
dataChannel: RTCDataChannel;
149156
}> {
150-
const stunServer = "stun:stun.l.google.com:19302";
157+
const iceServers = customIceServers ?? DEFAULT_ICE_SERVERS;
151158

152159
const pc = new RTCPeerConnection({
153-
iceServers: [{ urls: [stunServer] }]
160+
iceServers: iceServers as RTCIceServer[]
154161
});
155162

156163
// Add transceiver for receiving remote video (BEFORE adding tracks - order matters!)
@@ -470,8 +477,11 @@ export async function useStream({
470477
// Step 1: Use provided media stream
471478
const localStream = source;
472479

473-
// Step 2: Prepare peer connection and create offer
474-
const { pc, offer, remoteStreamPromise, dataChannel } = await preparePeerConnection(localStream);
480+
// Step 2: Prepare peer connection and create offer (with custom ICE servers if provided)
481+
const { pc, offer, remoteStreamPromise, dataChannel } = await preparePeerConnection(
482+
localStream,
483+
wrtcParams.iceServers
484+
);
475485

476486
// Step 3: Call connector.connectWrtc to exchange SDP and get answer
477487
const answer = await connector.connectWrtc(

0 commit comments

Comments
 (0)