Skip to content

Commit 6d5bb1b

Browse files
authored
Merge pull request QwenLM#284 from QwenLM/feat/usage_stats_logging
feat: add usage statistics logging for Qwen integration
2 parents fb9f2d2 + 2655af0 commit 6d5bb1b

File tree

16 files changed

+673
-128
lines changed

16 files changed

+673
-128
lines changed

docs/cli/configuration.md

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -523,3 +523,5 @@ You can opt out of usage statistics collection at any time by setting the `usage
523523
"usageStatisticsEnabled": false
524524
}
525525
```
526+
527+
Note: When usage statistics are enabled, events are sent to an Alibaba Cloud RUM collection endpoint.

packages/core/src/config/config.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ import {
4343
DEFAULT_GEMINI_EMBEDDING_MODEL,
4444
DEFAULT_GEMINI_FLASH_MODEL,
4545
} from './models.js';
46-
import { ClearcutLogger } from '../telemetry/clearcut-logger/clearcut-logger.js';
46+
import { QwenLogger } from '../telemetry/qwen-logger/qwen-logger.js';
4747
import { shouldAttemptBrowserLaunch } from '../utils/browser.js';
4848
import { MCPOAuthConfig } from '../mcp/oauth-provider.js';
4949
import { IdeClient } from '../ide/ide-client.js';
@@ -360,7 +360,7 @@ export class Config {
360360
}
361361

362362
if (this.getUsageStatisticsEnabled()) {
363-
ClearcutLogger.getInstance(this)?.logStartSessionEvent(
363+
QwenLogger.getInstance(this)?.logStartSessionEvent(
364364
new StartSessionEvent(this),
365365
);
366366
} else {

packages/core/src/core/__tests__/openaiTimeoutHandling.test.ts

Lines changed: 5 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ vi.mock('openai');
1515
// Mock logger modules
1616
vi.mock('../../telemetry/loggers.js', () => ({
1717
logApiResponse: vi.fn(),
18+
logApiError: vi.fn(),
1819
}));
1920

2021
vi.mock('../../utils/openaiLogger.js', () => ({
@@ -290,28 +291,18 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
290291
});
291292

292293
describe('token estimation on timeout', () => {
293-
it('should estimate tokens even when request times out', async () => {
294+
it('should surface a clear timeout error when request times out', async () => {
294295
const timeoutError = new Error('Request timeout');
295296
mockOpenAIClient.chat.completions.create.mockRejectedValue(timeoutError);
296297

297-
// Mock countTokens to return a value
298-
const mockCountTokens = vi.spyOn(generator, 'countTokens');
299-
mockCountTokens.mockResolvedValue({ totalTokens: 100 });
300-
301298
const request = {
302299
contents: [{ role: 'user' as const, parts: [{ text: 'Hello world' }] }],
303300
model: 'gpt-4',
304301
};
305302

306-
try {
307-
await generator.generateContent(request, 'test-prompt-id');
308-
} catch (_error) {
309-
// Verify that countTokens was called for estimation
310-
expect(mockCountTokens).toHaveBeenCalledWith({
311-
contents: request.contents,
312-
model: 'gpt-4',
313-
});
314-
}
303+
await expect(
304+
generator.generateContent(request, 'test-prompt-id'),
305+
).rejects.toThrow(/Request timeout after \d+s/);
315306
});
316307

317308
it('should fall back to character-based estimation if countTokens fails', async () => {

packages/core/src/core/geminiChat.ts

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -158,14 +158,23 @@ export class GeminiChat {
158158
prompt_id: string,
159159
usageMetadata?: GenerateContentResponseUsageMetadata,
160160
responseText?: string,
161+
responseId?: string,
161162
): Promise<void> {
163+
const authType = this.config.getContentGeneratorConfig()?.authType;
164+
165+
// Don't log API responses for openaiContentGenerator
166+
if (authType === AuthType.QWEN_OAUTH || authType === AuthType.USE_OPENAI) {
167+
return;
168+
}
169+
162170
logApiResponse(
163171
this.config,
164172
new ApiResponseEvent(
173+
responseId || `gemini-${Date.now()}`,
165174
this.config.getModel(),
166175
durationMs,
167176
prompt_id,
168-
this.config.getContentGeneratorConfig()?.authType,
177+
authType,
169178
usageMetadata,
170179
responseText,
171180
),
@@ -176,18 +185,27 @@ export class GeminiChat {
176185
durationMs: number,
177186
error: unknown,
178187
prompt_id: string,
188+
responseId?: string,
179189
): void {
180190
const errorMessage = error instanceof Error ? error.message : String(error);
181191
const errorType = error instanceof Error ? error.name : 'unknown';
182192

193+
const authType = this.config.getContentGeneratorConfig()?.authType;
194+
195+
// Don't log API errors for openaiContentGenerator
196+
if (authType === AuthType.QWEN_OAUTH || authType === AuthType.USE_OPENAI) {
197+
return;
198+
}
199+
183200
logApiError(
184201
this.config,
185202
new ApiErrorEvent(
203+
responseId,
186204
this.config.getModel(),
187205
errorMessage,
188206
durationMs,
189207
prompt_id,
190-
this.config.getContentGeneratorConfig()?.authType,
208+
authType,
191209
errorType,
192210
),
193211
);
@@ -320,6 +338,7 @@ export class GeminiChat {
320338
prompt_id,
321339
response.usageMetadata,
322340
JSON.stringify(response),
341+
response.responseId,
323342
);
324343

325344
this.sendPromise = (async () => {
@@ -563,6 +582,7 @@ export class GeminiChat {
563582
prompt_id,
564583
this.getFinalUsageMetadata(chunks),
565584
JSON.stringify(chunks),
585+
chunks[chunks.length - 1]?.responseId,
566586
);
567587
}
568588
this.recordHistory(inputContent, outputContent);

packages/core/src/core/openaiContentGenerator.test.ts

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ vi.mock('openai');
2323
// Mock logger modules
2424
vi.mock('../telemetry/loggers.js', () => ({
2525
logApiResponse: vi.fn(),
26+
logApiError: vi.fn(),
2627
}));
2728

2829
vi.mock('../utils/openaiLogger.js', () => ({

packages/core/src/core/openaiContentGenerator.ts

Lines changed: 34 additions & 90 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ import {
2222
} from '@google/genai';
2323
import { AuthType, ContentGenerator } from './contentGenerator.js';
2424
import OpenAI from 'openai';
25-
import { logApiResponse } from '../telemetry/loggers.js';
26-
import { ApiResponseEvent } from '../telemetry/types.js';
25+
import { logApiError, logApiResponse } from '../telemetry/loggers.js';
26+
import { ApiErrorEvent, ApiResponseEvent } from '../telemetry/types.js';
2727
import { Config } from '../config/config.js';
2828
import { openaiLogger } from '../utils/openaiLogger.js';
2929

@@ -263,6 +263,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
263263

264264
// Log API response event for UI telemetry
265265
const responseEvent = new ApiResponseEvent(
266+
response.responseId || 'unknown',
266267
this.model,
267268
durationMs,
268269
userPromptId,
@@ -291,41 +292,21 @@ export class OpenAIContentGenerator implements ContentGenerator {
291292
? error.message
292293
: String(error);
293294

294-
// Estimate token usage even when there's an error
295-
// This helps track costs and usage even for failed requests
296-
let estimatedUsage;
297-
try {
298-
const tokenCountResult = await this.countTokens({
299-
contents: request.contents,
300-
model: this.model,
301-
});
302-
estimatedUsage = {
303-
promptTokenCount: tokenCountResult.totalTokens,
304-
candidatesTokenCount: 0, // No completion tokens since request failed
305-
totalTokenCount: tokenCountResult.totalTokens,
306-
};
307-
} catch {
308-
// If token counting also fails, provide a minimal estimate
309-
const contentStr = JSON.stringify(request.contents);
310-
const estimatedTokens = Math.ceil(contentStr.length / 4);
311-
estimatedUsage = {
312-
promptTokenCount: estimatedTokens,
313-
candidatesTokenCount: 0,
314-
totalTokenCount: estimatedTokens,
315-
};
316-
}
317-
318-
// Log API error event for UI telemetry with estimated usage
319-
const errorEvent = new ApiResponseEvent(
295+
// Log API error event for UI telemetry
296+
const errorEvent = new ApiErrorEvent(
297+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
298+
(error as any).requestID || 'unknown',
320299
this.model,
300+
errorMessage,
321301
durationMs,
322302
userPromptId,
323303
this.config.getContentGeneratorConfig()?.authType,
324-
estimatedUsage,
325-
undefined,
326-
errorMessage,
304+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
305+
(error as any).type,
306+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
307+
(error as any).code,
327308
);
328-
logApiResponse(this.config, errorEvent);
309+
logApiError(this.config, errorEvent);
329310

330311
// Log error interaction if enabled
331312
if (this.config.getContentGeneratorConfig()?.enableOpenAILogging) {
@@ -414,6 +395,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
414395

415396
// Log API response event for UI telemetry
416397
const responseEvent = new ApiResponseEvent(
398+
responses[responses.length - 1]?.responseId || 'unknown',
417399
this.model,
418400
durationMs,
419401
userPromptId,
@@ -445,40 +427,21 @@ export class OpenAIContentGenerator implements ContentGenerator {
445427
? error.message
446428
: String(error);
447429

448-
// Estimate token usage even when there's an error in streaming
449-
let estimatedUsage;
450-
try {
451-
const tokenCountResult = await this.countTokens({
452-
contents: request.contents,
453-
model: this.model,
454-
});
455-
estimatedUsage = {
456-
promptTokenCount: tokenCountResult.totalTokens,
457-
candidatesTokenCount: 0, // No completion tokens since request failed
458-
totalTokenCount: tokenCountResult.totalTokens,
459-
};
460-
} catch {
461-
// If token counting also fails, provide a minimal estimate
462-
const contentStr = JSON.stringify(request.contents);
463-
const estimatedTokens = Math.ceil(contentStr.length / 4);
464-
estimatedUsage = {
465-
promptTokenCount: estimatedTokens,
466-
candidatesTokenCount: 0,
467-
totalTokenCount: estimatedTokens,
468-
};
469-
}
470-
471-
// Log API error event for UI telemetry with estimated usage
472-
const errorEvent = new ApiResponseEvent(
430+
// Log API error event for UI telemetry
431+
const errorEvent = new ApiErrorEvent(
432+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
433+
(error as any).requestID || 'unknown',
473434
this.model,
435+
errorMessage,
474436
durationMs,
475437
userPromptId,
476438
this.config.getContentGeneratorConfig()?.authType,
477-
estimatedUsage,
478-
undefined,
479-
errorMessage,
439+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
440+
(error as any).type,
441+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
442+
(error as any).code,
480443
);
481-
logApiResponse(this.config, errorEvent);
444+
logApiError(this.config, errorEvent);
482445

483446
// Log error interaction if enabled
484447
if (this.config.getContentGeneratorConfig()?.enableOpenAILogging) {
@@ -518,40 +481,21 @@ export class OpenAIContentGenerator implements ContentGenerator {
518481
? error.message
519482
: String(error);
520483

521-
// Estimate token usage even when there's an error in streaming setup
522-
let estimatedUsage;
523-
try {
524-
const tokenCountResult = await this.countTokens({
525-
contents: request.contents,
526-
model: this.model,
527-
});
528-
estimatedUsage = {
529-
promptTokenCount: tokenCountResult.totalTokens,
530-
candidatesTokenCount: 0, // No completion tokens since request failed
531-
totalTokenCount: tokenCountResult.totalTokens,
532-
};
533-
} catch {
534-
// If token counting also fails, provide a minimal estimate
535-
const contentStr = JSON.stringify(request.contents);
536-
const estimatedTokens = Math.ceil(contentStr.length / 4);
537-
estimatedUsage = {
538-
promptTokenCount: estimatedTokens,
539-
candidatesTokenCount: 0,
540-
totalTokenCount: estimatedTokens,
541-
};
542-
}
543-
544-
// Log API error event for UI telemetry with estimated usage
545-
const errorEvent = new ApiResponseEvent(
484+
// Log API error event for UI telemetry
485+
const errorEvent = new ApiErrorEvent(
486+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
487+
(error as any).requestID || 'unknown',
546488
this.model,
489+
errorMessage,
547490
durationMs,
548491
userPromptId,
549492
this.config.getContentGeneratorConfig()?.authType,
550-
estimatedUsage,
551-
undefined,
552-
errorMessage,
493+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
494+
(error as any).type,
495+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
496+
(error as any).code,
553497
);
554-
logApiResponse(this.config, errorEvent);
498+
logApiError(this.config, errorEvent);
555499

556500
// Allow subclasses to suppress error logging for specific scenarios
557501
if (!this.shouldSuppressErrorLogging(error, request)) {

packages/core/src/services/loopDetectionService.test.ts

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,8 @@ import { LoopDetectionService } from './loopDetectionService.js';
1919

2020
vi.mock('../telemetry/loggers.js', () => ({
2121
logLoopDetected: vi.fn(),
22+
logApiError: vi.fn(),
23+
logApiResponse: vi.fn(),
2224
}));
2325

2426
const TOOL_CALL_LOOP_THRESHOLD = 5;

packages/core/src/telemetry/clearcut-logger/event-metadata-key.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
* SPDX-License-Identifier: Apache-2.0
55
*/
66

7-
// Defines valid event metadata keys for Clearcut logging.
7+
// Defines valid event metadata keys for Qwen logging.
88
export enum EventMetadataKey {
99
GEMINI_CLI_KEY_UNKNOWN = 0,
1010

packages/core/src/telemetry/integration.test.circular.ts

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,12 @@
99
*/
1010

1111
import { describe, it, expect } from 'vitest';
12-
import { ClearcutLogger } from './clearcut-logger/clearcut-logger.js';
12+
import { QwenLogger } from './qwen-logger/qwen-logger.js';
13+
import { RumEvent } from './qwen-logger/event-types.js';
1314
import { Config } from '../config/config.js';
1415

1516
describe('Circular Reference Integration Test', () => {
16-
it('should handle HttpsProxyAgent-like circular references in clearcut logging', () => {
17+
it('should handle HttpsProxyAgent-like circular references in qwen logging', () => {
1718
// Create a mock config with proxy
1819
const mockConfig = {
1920
getTelemetryEnabled: () => true,
@@ -44,16 +45,20 @@ describe('Circular Reference Integration Test', () => {
4445
proxyAgentLike.sockets['cloudcode-pa.googleapis.com:443'] = [socketLike];
4546

4647
// Create an event that would contain this circular structure
47-
const problematicEvent = {
48+
const problematicEvent: RumEvent = {
49+
timestamp: Date.now(),
50+
event_type: 'exception',
51+
type: 'error',
52+
name: 'api_error',
4853
error: new Error('Network error'),
4954
function_args: {
5055
filePath: '/test/file.txt',
5156
httpAgent: proxyAgentLike, // This would cause the circular reference
5257
},
53-
};
58+
} as RumEvent;
5459

55-
// Test that ClearcutLogger can handle this
56-
const logger = ClearcutLogger.getInstance(mockConfig);
60+
// Test that QwenLogger can handle this
61+
const logger = QwenLogger.getInstance(mockConfig);
5762

5863
expect(() => {
5964
logger?.enqueueLogEvent(problematicEvent);

0 commit comments

Comments
 (0)