Skip to content

Commit 0d8547c

Browse files
authored
fix(vercelai): Fix input token count (#18574)
Makes sure that `gen_ai.input_tokens` value contains `gen_ai.input_tokens.cached`
1 parent 71384a2 commit 0d8547c

File tree

2 files changed

+48
-0
lines changed

2 files changed

+48
-0
lines changed

packages/core/src/tracing/vercel-ai/index.ts

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -119,6 +119,15 @@ function processEndedVercelAiSpan(span: SpanJSON): void {
119119
renameAttributeKey(attributes, AI_USAGE_PROMPT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE);
120120
renameAttributeKey(attributes, AI_USAGE_CACHED_INPUT_TOKENS_ATTRIBUTE, GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE);
121121

122+
// Input tokens is the sum of prompt tokens and cached input tokens
123+
if (
124+
typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number' &&
125+
typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE] === 'number'
126+
) {
127+
attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] =
128+
attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] + attributes[GEN_AI_USAGE_INPUT_TOKENS_CACHED_ATTRIBUTE];
129+
}
130+
122131
if (
123132
typeof attributes[GEN_AI_USAGE_OUTPUT_TOKENS_ATTRIBUTE] === 'number' &&
124133
typeof attributes[GEN_AI_USAGE_INPUT_TOKENS_ATTRIBUTE] === 'number'
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
import { describe, expect, it } from 'vitest';
2+
import { addVercelAiProcessors } from '../../../src/tracing/vercel-ai';
3+
import type { SpanJSON } from '../../../src/types-hoist/span';
4+
import { getDefaultTestClientOptions, TestClient } from '../../mocks/client';
5+
6+
describe('vercel-ai cached tokens', () => {
7+
it('should add cached input tokens to total input tokens', () => {
8+
const options = getDefaultTestClientOptions({ tracesSampleRate: 1.0 });
9+
const client = new TestClient(options);
10+
client.init();
11+
addVercelAiProcessors(client);
12+
13+
const mockSpan: SpanJSON = {
14+
description: 'test',
15+
span_id: 'test-span-id',
16+
trace_id: 'test-trace-id',
17+
start_timestamp: 1000,
18+
timestamp: 2000,
19+
origin: 'auto.vercelai.otel',
20+
data: {
21+
'ai.usage.promptTokens': 100,
22+
'ai.usage.cachedInputTokens': 50,
23+
},
24+
};
25+
26+
const event = {
27+
type: 'transaction' as const,
28+
spans: [mockSpan],
29+
};
30+
31+
const eventProcessor = client['_eventProcessors'].find(processor => processor.id === 'VercelAiEventProcessor');
32+
expect(eventProcessor).toBeDefined();
33+
34+
const processedEvent = eventProcessor!(event, {});
35+
36+
expect(processedEvent?.spans?.[0]?.data?.['gen_ai.usage.input_tokens']).toBe(150);
37+
expect(processedEvent?.spans?.[0]?.data?.['gen_ai.usage.input_tokens.cached']).toBe(50);
38+
});
39+
});

0 commit comments

Comments
 (0)