diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3504667d..4c541dc3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -278,9 +278,6 @@ jobs: echo "$env:TEMP\wix" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append # Set MSI-compatible version - echo "=== VERSION ===" - echo "${{ steps.get_version.outputs.VERSION }}" - # $originalVersion = "${{ steps.get_version.outputs.VERSION }}" $configPath = "dui\src-tauri\tauri.conf.json" $config = Get-Content $configPath | ConvertFrom-Json $originalVersion = $config.version diff --git a/CHANGELOG.md b/CHANGELOG.md index 14d9fb14..5955251d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,35 +17,185 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 -## [0.9.0] - 2025-08-09 +## [0.9.10] - 2025-10-04 -### Notes +### Added -#### Major Upgrade +- support resource autocomplete suggestions for all datasource types +- option to exclude datasource from autocomplete suggestions +- UI controls to filter list of message entries +- new tool for download_resource +- support for remote MCP servers +- support for MCP oAuth +- support for MCP sampling +- support for 'best model' selection for sampling requests +- foundation for session management - multi-user in API, with auth from bui/cli -Beyond Better now supports both read and write editing for external data sources. Notion and Google Docs -are currently supported with more providers coming soon. See the "Data Sources" help page to learn how to -add data sources and configure authentication. +### Changed -- https://https://beyondbetter.app/docs/datasources +- updated default model to Claude Sonnet 4.5 +- fetch timeout for bbLLM +- updated deps for MCP SDK +- UI for MCP server configs + +### Fixed + +- reading PDF documents with Claude +- project/request context in McpManager +- reconnection for MCP sessions + + +## [0.9.9] - 2025-09-05 ### Added +- [saas] Google File Picker for restricted access to drive contents +- Conversation export and copy to clipboard ### Changed -- releases moved to BB storage bucket +- filtered instructions for datasources +- additional datasource instructions for utility tools (rename, remove, etc) + +### Fixed + +- [saas] file rename for google datasource + + +## [0.9.8] - 2025-08-31 + +### Added + +- [saas] google auth flow in app (dui) + +### Changed + + +### Fixed + +- searching collaborations +## [0.9.7] - 2025-08-30 + +### Added + +- [saas] support for Google Sheets editing + +### Changed + +- [saas] refactored google datasource for better extensibility + +### Fixed + + +## [0.9.6] - 2025-08-17 + +### Added + +- deep research (using new web_search tool) + +### Changed + +- [saas] google datasource supports resource rename and delete +- [saas] google datasource supports loading non-doc resources (eg image files) +- [saas] google and notion datasources support more robust resource searching + ### Fixed +- save message history after fatal conversation errors +- caching for feature access checks + -## [0.8.16] - 2025-08-08 +## [0.9.5] - 2025-08-14 ### Added -- read/write support for editing Notion documents (saas only) -- read/write support for editing GoogleDocs documents (saas only) +- support for pricing tiers for models with large context windows +- UI feedback for tiers in progress bar + +### Changed + +- Claude Sonnet uses 1 million token context window + +### Fixed + +- updated details for capabilities & pricing for Anthropic, Google and OpenAI models + + +## [0.9.4] - 2025-08-12 + +### Added + +- support for Claude Opus 4.1 +- support for ChatGPT 5 (& mini & nano) + +### Changed + + +### Fixed + + +## [0.9.3] - 2025-08-12 + +### Added + +- datasource instructions for writeResource content formats + +### Changed + +- less verbose system prompts (rely on datasource instructions instead) + +### Fixed + +- [saas] google datasource supports structured content for writeResource tool +- ensure terminal detachment during app self-update +- [saas] invalidate stale auth config after updating google auth in BUI + + +## [0.9.2] - 2025-08-11 + +### Added + + +### Changed + + +### Fixed + +- [saas] oAuth flow targets and config + + +## [0.9.1] - 2025-08-10 + +### Added + + +### Changed + +- additional editing instructions for different datasources + +### Fixed + +- [saas] apply range edits in descending order to avoid index shifting with multiple operations + + +## [0.9.0] - 2025-08-09 + +### Notes + +#### Major Upgrade + +Beyond Better now supports both read and write editing for external data sources. Notion and Google Docs +are currently supported with more providers coming soon [saas]. See the "Data Sources" help page to learn how to +add data sources and configure authentication. + +- https://https://beyondbetter.app/docs/datasources + +### Added + +- [saas] read/write support for editing Notion documents +- [saas] read/write support for editing Google documents - dynamic loading for third-party datasources - integration tests for datasources @@ -54,6 +204,7 @@ add data sources and configure authentication. - remaining tools updated to use datasource accessors - deprecated searchAndReplace and rewriteResource tools in favour of editResource and writeResource - support for contentFormat and editTypes to all resource tools +- releases moved to BB storage bucket ### Fixed @@ -754,7 +905,7 @@ location of project config; changes format of storage and configurations; and mo ### Added -- http/websocket proxy in BUI to work around mixed content warnings in webview +- http/websocket proxy in DUI to work around mixed content warnings in webview - persisting windows size/position - dark mode support diff --git a/TODO.md b/TODO.md index 61c1ba19..d4dc3392 100644 --- a/TODO.md +++ b/TODO.md @@ -272,3 +272,26 @@ X Change free plan to BYO api keys - limited data sources and tools √ add token-usage log to llm-proxy √ save partial prompt when navigating away, not just on page reload (eg, user switches to different conversation or to project settings) +√ Fix postgres linter errors: https://supabase.com/dashboard/project/asyagnmzoxgyhqprdaky/advisors/security +√ Change tools to use accessors: removeResources, renameResources, rewriteResources +√ Star conversations in BUI +√ Support coupon codes for subscriptions +√ Ensure Oauth details get reloaded into projectConfig after doing auth flow +√ Add detailed info in ~~system prompt~~ datasource instructions for how to structure URIs +√ Progress marker for pricing tier change in chat input +√ Add "Settings" to docs - with focus on model selection +√ Hardcoded tick in progress bar +√ Token pill is not resetting with new (or changed) conversation +√ Add feature restrictions for read/write datasources +√ Test whether feature checks for datasources are working +√ Pricing fallback error - admin notifiction should include details of token type, model, etc +√ No pricing found for token type anthropic_cache_read in model gemini-2.5-pro-preview-06-05 +√ Create token_usage types to use tiers and cache types from provider_model_pricing +√ Check token_usage records for Gemini (done with new tiered pricing) +√ After auth for googledocs datasource; ensure saving to project config also updates `auth` in instantiated datasource +√ Re-enable model features check in llm-proxy +√ Re-enable caching in features utils files +√ Include symlinks in filebrowser for datasources - symlinks should get resolved before saving root directory +X Change searchAndReplace tool to use structured `data` for `bbResponse` - Using editResource tool instead +√ change conversation title in BUI +√ Ensure message history is saved, even if there is tool error (should be ok) or LLM response error diff --git a/api/deno.jsonc b/api/deno.jsonc index f315a3df..430190e3 100644 --- a/api/deno.jsonc +++ b/api/deno.jsonc @@ -1,6 +1,6 @@ { "name": "bb-api", - "version": "0.9.0-oss", + "version": "0.9.10-oss", "license": "AGPL-3.0-or-later", "copyright": "2025 - Beyond Better ", "exports": "./src/main.ts", @@ -15,8 +15,8 @@ "start": "DENO_TLS_CA_STORE=system deno run --allow-read --allow-write --allow-run --allow-net --allow-env src/main.ts", "dev": "DENO_TLS_CA_STORE=system deno run --watch --allow-read --allow-write --allow-run --allow-net --allow-env src/main.ts", "debug": "DENO_TLS_CA_STORE=system LOG_LEVEL=debug deno run --allow-read --allow-write --allow-run --allow-net --allow-env src/main.ts", - "test": "DENO_TLS_CA_STORE=system deno test --allow-read --allow-write --allow-run --allow-net --allow-env --allow-import tests/ tests/t/llms/tools/index.ts", - "test-single": "DENO_TLS_CA_STORE=system deno test --allow-read --allow-write --allow-run --allow-net --allow-env --allow-import ", + "test": "DENO_TLS_CA_STORE=system deno test --allow-read --allow-write --allow-run --allow-net --allow-env --allow-import --unstable-kv tests/ tests/t/llms/tools/index.ts", + "test-single": "DENO_TLS_CA_STORE=system deno test --allow-read --allow-write --allow-run --allow-net --allow-env --allow-import --unstable-kv ", "generate-tools-manifest": "deno run --allow-read --allow-write scripts/generate_core_tools_manifest.ts && deno fmt ./src/llms/tools_manifest.ts", "update-model-capabilities": "deno run --allow-net --allow-read --allow-write --allow-env scripts/update_model_capabilities.ts", "sync-models-to-llm_proxy": "deno run --allow-net --allow-read --allow-write --allow-env scripts/sync_models_to_llm_proxy.ts", diff --git a/api/scripts/debug-googledocs.ts b/api/scripts/debug-googledocs.ts deleted file mode 100644 index 21fd5829..00000000 --- a/api/scripts/debug-googledocs.ts +++ /dev/null @@ -1,196 +0,0 @@ -/** - * Debug script for Google Docs API issues - * This script helps diagnose common problems with Google Docs API access - */ - -import { logger } from 'shared/logger.ts'; - -// Configuration from environment variables -const GOOGLE_ACCESS_TOKEN = Deno.env.get('GOOGLE_ACCESS_TOKEN') || 'your-google-access-token'; -const TEST_DOCUMENT_ID = Deno.env.get('TEST_DOCUMENT_ID') || 'your-document-id'; - -async function debugGoogleDocsAccess() { - console.log('Google Docs API Debug Script'); - console.log('============================\n'); - - if (GOOGLE_ACCESS_TOKEN === 'your-google-access-token') { - console.error('ERROR: Please set the GOOGLE_ACCESS_TOKEN environment variable'); - Deno.exit(1); - } - - try { - // Step 1: Verify token info - console.log('Step 1: Verifying token info...'); - const tokenInfoResponse = await fetch( - 'https://www.googleapis.com/oauth2/v1/tokeninfo', - { - headers: { - 'Authorization': `Bearer ${GOOGLE_ACCESS_TOKEN}`, - }, - } - ); - - if (tokenInfoResponse.ok) { - const tokenInfo = await tokenInfoResponse.json(); - console.log('✅ Token is valid'); - console.log(' Scopes:', tokenInfo.scope); - console.log(' Expires in:', tokenInfo.expires_in, 'seconds'); - console.log(' Client ID:', tokenInfo.issued_to); - } else { - console.error('❌ Token validation failed:', await tokenInfoResponse.text()); - return; - } - - // Step 2: Test Drive API access (to verify basic API connectivity) - console.log('\nStep 2: Testing Drive API access...'); - const driveResponse = await fetch( - 'https://www.googleapis.com/drive/v3/about?fields=user', - { - headers: { - 'Authorization': `Bearer ${GOOGLE_ACCESS_TOKEN}`, - 'Accept': 'application/json', - }, - } - ); - - if (driveResponse.ok) { - const driveInfo = await driveResponse.json(); - console.log('✅ Drive API access working'); - console.log(' User email:', driveInfo.user?.emailAddress); - console.log(' User name:', driveInfo.user?.displayName); - } else { - console.error('❌ Drive API access failed:', await driveResponse.text()); - } - - // Step 3: List available documents - console.log('\nStep 3: Listing your accessible Google Docs...'); - const listResponse = await fetch( - "https://www.googleapis.com/drive/v3/files?q=mimeType='application/vnd.google-apps.document' and trashed=false&pageSize=10&fields=files(id,name,webViewLink,owners,shared,permissions)", - { - headers: { - 'Authorization': `Bearer ${GOOGLE_ACCESS_TOKEN}`, - 'Accept': 'application/json', - }, - } - ); - - if (listResponse.ok) { - const listData = await listResponse.json(); - console.log(`✅ Found ${listData.files?.length || 0} accessible documents:`); - - listData.files?.forEach((file: any, index: number) => { - console.log(` ${index + 1}. ${file.name}`); - console.log(` ID: ${file.id}`); - console.log(` URL: ${file.webViewLink}`); - console.log(` Shared: ${file.shared ? 'Yes' : 'No'}`); - console.log(` Owner: ${file.owners?.[0]?.displayName || 'Unknown'}`); - console.log(''); - }); - - // Check if our test document is in the list - if (TEST_DOCUMENT_ID !== 'your-document-id') { - const foundDoc = listData.files?.find((file: any) => file.id === TEST_DOCUMENT_ID); - if (foundDoc) { - console.log(`✅ Test document found in accessible documents: ${foundDoc.name}`); - } else { - console.log(`⚠️ Test document ID ${TEST_DOCUMENT_ID} not found in accessible documents`); - console.log(' This might be why you\'re getting a 404 error.'); - } - } - } else { - console.error('❌ Failed to list documents:', await listResponse.text()); - } - - // Step 4: Test document access (if TEST_DOCUMENT_ID is provided) - if (TEST_DOCUMENT_ID !== 'your-document-id') { - console.log(`\nStep 4: Testing access to specific document ${TEST_DOCUMENT_ID}...`); - - // First, try to get file metadata from Drive API - const metadataResponse = await fetch( - `https://www.googleapis.com/drive/v3/files/${TEST_DOCUMENT_ID}?fields=id,name,mimeType,owners,shared,permissions`, - { - headers: { - 'Authorization': `Bearer ${GOOGLE_ACCESS_TOKEN}`, - 'Accept': 'application/json', - }, - } - ); - - if (metadataResponse.ok) { - const metadata = await metadataResponse.json(); - console.log('✅ Document metadata accessible via Drive API:'); - console.log(' Name:', metadata.name); - console.log(' MIME type:', metadata.mimeType); - console.log(' Shared:', metadata.shared ? 'Yes' : 'No'); - console.log(' Owner:', metadata.owners?.[0]?.displayName || 'Unknown'); - - // Now try the Docs API - console.log('\nStep 4b: Testing Docs API access...'); - // Test the correct Docs API endpoint - console.log(' Testing correct Docs API endpoint...'); - const docsResponse = await fetch( - `https://docs.googleapis.com/v1/documents/${TEST_DOCUMENT_ID}`, - { - headers: { - 'Authorization': `Bearer ${GOOGLE_ACCESS_TOKEN}`, - 'Accept': 'application/json', - }, - } - ); - - // Check docs.googleapis.com result (correct endpoint) - if (docsResponse.ok) { - const docData = await docsResponse.json(); - console.log('✅ Document accessible via Docs API (correct endpoint)'); - console.log(' Title:', docData.title); - console.log(' Document ID:', docData.documentId); - console.log(' Revision ID:', docData.revisionId); - } else { - const errorText = await docsResponse.text(); - console.error('❌ Docs API access failed:', docsResponse.status, errorText); - - if (docsResponse.status === 404) { - console.log('\n🔍 Diagnosis: 404 Error Analysis'); - console.log(' The document exists (Drive API can see it) but Docs API cannot access it.'); - console.log(' Possible causes:'); - console.log(' 1. Document sharing permissions don\'t include your OAuth app'); - console.log(' 2. Document is in a restricted folder'); - console.log(' 3. Document owner needs to grant additional permissions'); - console.log(' 4. The document might be corrupted or in an unsupported state'); - } - } - } else { - const errorText = await metadataResponse.text(); - console.error('❌ Document metadata not accessible:', metadataResponse.status, errorText); - - if (metadataResponse.status === 404) { - console.log('\n🔍 Diagnosis: Document not found'); - console.log(' The document ID might be incorrect or you don\'t have access to it.'); - console.log(' Double-check the document ID from the URL.'); - } - } - } - - // Step 5: Recommendations - console.log('\nStep 5: Recommendations'); - console.log('======================='); - - if (TEST_DOCUMENT_ID === 'your-document-id') { - console.log('1. Set TEST_DOCUMENT_ID environment variable with a real document ID'); - console.log('2. Make sure you own the document or it\'s shared with you'); - } else { - console.log('1. Try using one of the document IDs listed above that you have access to'); - console.log('2. Make sure the document is shared with the email associated with your OAuth token'); - console.log('3. If you own the document, try sharing it with "Anyone with the link can view"'); - console.log('4. Check that the Google Docs API is enabled in your Google Cloud Console'); - } - - } catch (error) { - console.error('Error during debug:', error); - } -} - -// Run the debug script -if (import.meta.main) { - debugGoogleDocsAccess(); -} \ No newline at end of file diff --git a/api/scripts/sync_models_to_llm_proxy.ts b/api/scripts/sync_models_to_llm_proxy.ts index 31c36421..dc8992a5 100755 --- a/api/scripts/sync_models_to_llm_proxy.ts +++ b/api/scripts/sync_models_to_llm_proxy.ts @@ -13,7 +13,7 @@ * Options: * --input=PATH Input capabilities file (default: api/src/data/modelCapabilities.json) * --environment=ENV Environment: staging|production (default: staging) - * --supabase-url=URL Override Supabase config URL + * --supabase-config-url=URL Override Supabase config URL * --auth-token=TOKEN Authentication token (can also use env var) * --dry-run Show what would be synced without making changes * --force Update all models regardless of changes @@ -29,6 +29,12 @@ import type { PartialTokenPricing, //TokenTypeEnum } from 'shared/types/models.ts'; +import { + type CacheTypeConfig, + type ContentTypeConfig, + generateTokenType, + type TieredPricingConfig, +} from 'shared/tieredPricing.ts'; import { createClient } from '@supabase/supabase-js'; import { fetchSupabaseConfig } from 'api/auth/config.ts'; @@ -36,7 +42,7 @@ import { fetchSupabaseConfig } from 'api/auth/config.ts'; * LLM-Proxy model interface (matches new database schema) */ interface LLMProxyModel { - model_id: string; + //model_id: string; provider_name: string; model_name: string; model_type: string; @@ -56,7 +62,6 @@ interface LLMProxyModel { * Model update payload for the edge function (new dynamic structure) */ interface ModelUpdatePayload { - model_id: string; provider_name: string; model_name: string; model_type: string; @@ -71,7 +76,7 @@ interface ModelUpdatePayload { interface SyncConfig { inputPath: string; environment: 'staging' | 'production'; - supabaseUrl?: string; + supabaseConfigUrl?: string; authToken?: string; dryRun: boolean; force: boolean; @@ -81,7 +86,7 @@ interface SyncConfig { * Sync result for a single model */ interface SyncResult { - modelId: string; + modelName: string; provider: string; action: 'created' | 'updated' | 'skipped' | 'failed'; error?: string; @@ -91,6 +96,7 @@ interface SyncResult { interface UpdatedModelCapabilities { displayName: string; contextWindow: number; + featureKey: string; maxOutputTokens: number; token_pricing?: PartialTokenPricing; // New dynamic pricing structure pricing_metadata?: { @@ -109,6 +115,18 @@ interface UpdatedModelCapabilities { currency: string; effectiveDate: string; }; + // NEW: Tiered pricing configuration fields + inputTokensTieredConfig?: TieredPricingConfig; + inputTokensCacheTypes?: Record; + inputTokensContentTypes?: Record; + outputTokensTieredConfig?: TieredPricingConfig; + outputTokensCacheTypes?: Record; + outputTokensContentTypes?: Record; + thoughtTokensConfig?: { + basePrice: number; + tieredPricing?: TieredPricingConfig; + description?: string; + }; supportedFeatures: { functionCalling: boolean; json: boolean; @@ -170,6 +188,11 @@ class ModelSyncer { // Sync models await this.syncModels(modelsToSync); + // Post-sync validation + if (!this.config.dryRun) { + await this.validatePricingRecords(); + } + // Report results this.reportResults(); @@ -208,12 +231,12 @@ class ModelSyncer { */ private async fetchCurrentLLMProxyModels(): Promise { try { - console.log('🔍 Fetching current models from llm-proxy...'); - const supabaseConfig = await fetchSupabaseConfig({ - supabaseConfigUrl: this.config.supabaseUrl, + supabaseConfigUrl: this.config.supabaseConfigUrl, }); + console.log(`🔍 Fetching current models from llm-proxy at ${supabaseConfig.url} ...`); + const supabaseClient = createClient(supabaseConfig.url, supabaseConfig.anonKey); const { data, error } = await supabaseClient.functions.invoke('provider-models', { @@ -258,22 +281,22 @@ class ModelSyncer { continue; } - for (const [modelId, capabilities] of Object.entries(models)) { + for (const [modelName, capabilities] of Object.entries(models)) { // Skip hidden models if (capabilities.hidden) { - console.log(`⏭️ Skipping hidden model: ${provider}/${modelId}`); + console.log(`⏭️ Skipping hidden model: ${provider}/${modelName}`); continue; } // Convert to LLM-Proxy format - const payload = this.convertToLLMProxyFormat(provider, modelId, capabilities); + const payload = this.convertToLLMProxyFormat(provider, modelName, capabilities); // Check if model needs updating if (this.config.force || this.shouldUpdateModel(payload)) { modelsToSync.push(payload); } else { this.syncResults.push({ - modelId, + modelName: modelName, provider, action: 'skipped', }); @@ -290,12 +313,12 @@ class ModelSyncer { */ private convertToLLMProxyFormat( provider: string, - modelId: string, + modelName: string, capabilities: UpdatedModelCapabilities, ): ModelUpdatePayload { // Convert from new token_pricing structure or fallback to legacy pricing structure let token_pricing: PartialTokenPricing = {}; - + if (capabilities.token_pricing) { // New format: use token_pricing directly token_pricing = { ...capabilities.token_pricing }; @@ -303,12 +326,13 @@ class ModelSyncer { // Legacy format: convert from old pricing structure token_pricing.input = capabilities.pricing.inputTokens.basePriceCentsUsd; token_pricing.output = capabilities.pricing.outputTokens.basePriceCentsUsd; - + if (capabilities.pricing.inputTokens.cachedPriceCentsUsd !== undefined) { if (provider === 'anthropic') { token_pricing.anthropic_cache_read = capabilities.pricing.inputTokens.cachedPriceCentsUsd; // Estimate cache write cost as 1.25x base cost - token_pricing.anthropic_cache_write_5min = capabilities.pricing.inputTokens.basePriceCentsUsd * 1.25; + token_pricing.anthropic_cache_write_5min = capabilities.pricing.inputTokens.basePriceCentsUsd * + 1.25; } else { token_pricing.cache_read = capabilities.pricing.inputTokens.cachedPriceCentsUsd; } @@ -316,17 +340,18 @@ class ModelSyncer { } return { - model_id: modelId, provider_name: provider, - model_name: modelId, // Using modelId as model_name + model_name: modelName, model_type: 'text', // Default to text, could be enhanced based on capabilities token_pricing: token_pricing, // Dynamic pricing structure is_available: true, settings: { + // Existing settings displayName: capabilities.displayName, contextWindow: capabilities.contextWindow, maxOutputTokens: capabilities.maxOutputTokens, supportedFeatures: capabilities.supportedFeatures, + featureKey: capabilities.featureKey, responseSpeed: capabilities.responseSpeed, cost: capabilities.cost, intelligence: capabilities.intelligence, @@ -336,6 +361,33 @@ class ModelSyncer { ...(capabilities.trainingCutoff && { trainingCutoff: capabilities.trainingCutoff }), ...(capabilities.releaseDate && { releaseDate: capabilities.releaseDate }), ...(capabilities.pricing_metadata && { pricing_metadata: capabilities.pricing_metadata }), + + // NEW: Complete tiered pricing configuration stored directly in settings + ...(capabilities.inputTokensTieredConfig && { + inputTokensTieredConfig: capabilities.inputTokensTieredConfig, + }), + ...(capabilities.inputTokensCacheTypes && { + inputTokensCacheTypes: capabilities.inputTokensCacheTypes, + }), + ...(capabilities.inputTokensContentTypes && { + inputTokensContentTypes: capabilities.inputTokensContentTypes, + }), + ...(capabilities.outputTokensTieredConfig && { + outputTokensTieredConfig: capabilities.outputTokensTieredConfig, + }), + ...(capabilities.outputTokensCacheTypes && { + outputTokensCacheTypes: capabilities.outputTokensCacheTypes, + }), + ...(capabilities.outputTokensContentTypes && { + outputTokensContentTypes: capabilities.outputTokensContentTypes, + }), + ...(capabilities.thoughtTokensConfig && { + thoughtTokensConfig: capabilities.thoughtTokensConfig, + }), + + // Convenience flag for quick tier support detection + supportsTieredPricing: + !!(capabilities.inputTokensTieredConfig?.tiers || capabilities.outputTokensTieredConfig?.tiers), }, }; } @@ -345,7 +397,7 @@ class ModelSyncer { */ private shouldUpdateModel(payload: ModelUpdatePayload): boolean { const existing = this.currentLLMProxyModels.find( - (m) => m.model_id === payload.model_id && m.provider_name === payload.provider_name, + (m) => m.model_name === payload.model_name && m.provider_name === payload.provider_name, ); if (!existing) { @@ -373,11 +425,13 @@ class ModelSyncer { console.log('🧪 DRY RUN - Would sync the following models:'); for (const model of modelsToSync) { const existing = this.currentLLMProxyModels.find( - (m) => m.model_id === model.model_id && m.provider_name === model.provider_name, + (m) => m.model_name === model.model_name && m.provider_name === model.provider_name, ); const action = existing ? 'UPDATE' : 'CREATE'; const tokenTypes = Object.keys(model.token_pricing).join(', '); - console.log(` ${action}: ${model.provider_name}/${model.model_id} - ${model.settings.displayName} (${tokenTypes})`); + console.log( + ` ${action}: ${model.provider_name}/${model.model_name} - ${model.settings.displayName} (${tokenTypes})`, + ); } return; } @@ -385,7 +439,7 @@ class ModelSyncer { console.log(`🔄 Syncing ${modelsToSync.length} models...`); const supabaseConfig = await fetchSupabaseConfig({ - supabaseConfigUrl: this.config.supabaseUrl, + supabaseConfigUrl: this.config.supabaseConfigUrl, }); const supabaseClient = createClient(supabaseConfig.url, supabaseConfig.anonKey); @@ -395,7 +449,7 @@ class ModelSyncer { const progress = `[${i + 1}/${modelsToSync.length}]`; try { - console.log(`${progress} Syncing ${model.provider_name}/${model.model_id}...`); + console.log(`${progress} Syncing ${model.provider_name}/${model.model_name}...`); const { data: _data, error } = await supabaseClient.functions.invoke('sync-model', { method: 'POST', @@ -412,29 +466,31 @@ class ModelSyncer { } const existing = this.currentLLMProxyModels.find( - (m) => m.model_id === model.model_id && m.provider_name === model.provider_name, + (m) => m.model_name === model.model_name && m.provider_name === model.provider_name, ); this.syncResults.push({ - modelId: model.model_id, + modelName: model.model_name, provider: model.provider_name, action: existing ? 'updated' : 'created', }); const tokenCount = Object.keys(model.token_pricing).length; - console.log(` ✅ ${progress} Successfully synced ${model.provider_name}/${model.model_id} (${tokenCount} token types)`); + console.log( + ` ✅ ${progress} Successfully synced ${model.provider_name}/${model.model_name} (${tokenCount} token types)`, + ); } catch (error) { const errorMessage = isError(error) ? error.message : String(error); this.syncResults.push({ - modelId: model.model_id, + modelName: model.model_name, provider: model.provider_name, action: 'failed', error: errorMessage, }); console.log( - ` ❌ ${progress} Failed to sync ${model.provider_name}/${model.model_id}: ${errorMessage}`, + ` ❌ ${progress} Failed to sync ${model.provider_name}/${model.model_name}: ${errorMessage}`, ); } @@ -445,6 +501,87 @@ class ModelSyncer { } } + /** + * Validate that all synced models have essential pricing records + */ + private async validatePricingRecords(): Promise { + console.log('🔍 Validating pricing records for synced models...'); + + try { + const supabaseConfig = await fetchSupabaseConfig({ + supabaseConfigUrl: this.config.supabaseConfigUrl, + }); + + const supabaseClient = createClient(supabaseConfig.url, supabaseConfig.anonKey, { + db: { schema: 'abi_llm' }, + }); + + // Check for models missing essential pricing + const { data: modelsWithoutPricing, error } = await supabaseClient.rpc('validate_model_pricing'); + + if (error) { + console.error('❌ Error validating pricing records:', error.message); + return; + } + + if (modelsWithoutPricing && modelsWithoutPricing.length > 0) { + console.warn(`⚠️ Found ${modelsWithoutPricing.length} models with missing essential pricing:`); + for (const model of modelsWithoutPricing) { + console.warn( + ` - ${model.provider_name}/${model.model_name}: missing ${model.missing_types.join(', ')}`, + ); + } + + // These models should have been marked as unavailable by sync-model function + console.log('💡 Models with missing pricing have been marked as unavailable'); + } else { + console.log('✅ All synced models have essential pricing records'); + } + + // Check for fallback usage patterns (if models have tiered config but using base pricing) + const { data: tieredModelsWithoutRecords, error: tieredError } = await supabaseClient + .from('provider_models') + .select(` + model_id, + model_name, + provider_name, + settings + `) + .eq('is_available', true) + .not('settings->>supportsTieredPricing', 'is', null); + + if (!tieredError && tieredModelsWithoutRecords) { + const problematicModels = []; + + for (const model of tieredModelsWithoutRecords) { + if (model.settings?.supportsTieredPricing) { + // Check if model has any tiered pricing records + const { data: tieredRecords } = await supabaseClient + .from('provider_model_pricing') + .select('token_type') + .eq('model_id', model.model_id) + .like('token_type', '%_tier%') + .is('effective_until', null); + + if (!tieredRecords || tieredRecords.length === 0) { + problematicModels.push(`${model.provider_name}/${model.model_name}`); + } + } + } + + if (problematicModels.length > 0) { + console.warn(`⚠️ Models claiming tiered pricing but missing tiered records:`); + for (const model of problematicModels) { + console.warn(` - ${model}`); + } + console.log('💡 These models will fallback to base pricing during usage'); + } + } + } catch (error) { + console.error('❌ Error during pricing validation:', isError(error) ? error.message : error); + } + } + /** * Report synchronization results */ @@ -464,21 +601,21 @@ class ModelSyncer { if (failed.length > 0) { console.log('\n❌ Failed Models:'); for (const result of failed) { - console.log(` - ${result.provider}/${result.modelId}: ${result.error}`); + console.log(` - ${result.provider}/${result.modelName}: ${result.error}`); } } if (created.length > 0) { console.log('\n✅ Created Models:'); for (const result of created) { - console.log(` + ${result.provider}/${result.modelId}`); + console.log(` + ${result.provider}/${result.modelName}`); } } if (updated.length > 0) { console.log('\n🔄 Updated Models:'); for (const result of updated) { - console.log(` ~ ${result.provider}/${result.modelId}`); + console.log(` ~ ${result.provider}/${result.modelName}`); } } } @@ -487,7 +624,7 @@ class ModelSyncer { /** * Load environment-specific configuration */ -async function loadEnvironmentConfig(environment: string): Promise<{ authToken?: string; supabaseUrl?: string }> { +async function loadEnvironmentConfig(environment: string): Promise<{ authToken?: string; supabaseConfigUrl?: string }> { const scriptDir = dirname(fromFileUrl(import.meta.url)); const envFile = join(scriptDir, `.env.${environment}`); @@ -509,7 +646,7 @@ async function loadEnvironmentConfig(environment: string): Promise<{ authToken?: console.log(`📁 Loaded environment config from ${envFile}`); return { authToken: config.LLM_PROXY_AUTH_TOKEN, - supabaseUrl: config.SUPABASE_CONFIG_URL, + supabaseConfigUrl: config.SUPABASE_CONFIG_URL, }; } } catch (error) { @@ -529,7 +666,7 @@ async function main() { // Parse command line arguments const args = parseArgs(Deno.args, { - string: ['input', 'environment', 'supabase-url', 'auth-token'], + string: ['input', 'environment', 'supabase-config-url', 'auth-token'], boolean: ['dry-run', 'force'], default: { input: defaultInputPath, @@ -552,7 +689,7 @@ async function main() { const config: SyncConfig = { inputPath: args.input, environment: args.environment as 'staging' | 'production', - supabaseUrl: args['supabase-url'] || envConfig.supabaseUrl, + supabaseConfigUrl: args['supabase-config-url'] || envConfig.supabaseConfigUrl, authToken: args['auth-token'] || envConfig.authToken || Deno.env.get('LLM_PROXY_AUTH_TOKEN'), dryRun: args['dry-run'], force: args.force, diff --git a/api/scripts/update_model_capabilities.ts b/api/scripts/update_model_capabilities.ts index 1462f81d..469d128a 100755 --- a/api/scripts/update_model_capabilities.ts +++ b/api/scripts/update_model_capabilities.ts @@ -28,6 +28,7 @@ import type { PartialTokenPricing, //TokenTypeEnum } from 'shared/types/models.ts'; +import type { TieredPricingConfig, CacheTypeConfig, ContentTypeConfig } from 'shared/utils/tieredPricing.ts'; import type { ModelCapabilities } from 'api/types/modelCapabilities.types.ts'; import { createClient } from '@supabase/supabase-js'; import { fetchSupabaseConfig } from 'api/auth/config.ts'; @@ -57,12 +58,21 @@ interface SourceModel { inputTokens: { basePrice: number; cachedPrice?: number; - tieredPricing?: Record; + tieredPricing?: TieredPricingConfig; + cacheTypes?: Record; + contentTypes?: Record; multimodal?: Record; }; outputTokens: { basePrice: number; - tieredPricing?: Record; + tieredPricing?: TieredPricingConfig; + cacheTypes?: Record; + contentTypes?: Record; + }; + thoughtTokens?: { + basePrice: number; + tieredPricing?: TieredPricingConfig; + description?: string; }; currency: string; effectiveDate: string; @@ -131,7 +141,7 @@ interface LLMProxyModel { */ interface FetcherConfig { environment: 'staging' | 'production'; - supabaseUrl?: string; + supabaseConfigUrl?: string; outputPath: string; sourceDir: string; providersToProcess: string[]; @@ -167,6 +177,8 @@ function generateFeatureKey(provider: string, modelId: string): string { return 'models.openai.o4'; } else if (modelId.startsWith('o1')) { return 'models.openai.o1'; + } else if (modelId.includes('gpt-5') || modelId.includes('gpt5')) { + return 'models.openai.gpt5'; } else if (modelId.includes('gpt-4') || modelId.includes('gpt4')) { return 'models.openai.gpt4'; } else if (modelId.includes('gpt-3') || modelId.includes('gpt3')) { @@ -371,22 +383,29 @@ class ModelCapabilitiesFetcher { sourceData: SourceModelData, ): ModelCapabilities & { hidden?: boolean } { // Handle pricing - Ollama models are local/free - const token_pricing: Record = { input: 0, output: 0 }; + let token_pricing: Record = { input: 0, output: 0 }; let currency = 'USD'; let effectiveDate = new Date().toISOString().split('T')[0]; + + // Tiered pricing configuration (extracted from canonical conversion) + let canonicalPricing: any = {}; if (sourceModel.pricing && sourceModel.pricing.inputTokens && sourceModel.pricing.outputTokens) { - // Convert pricing to dynamic token_pricing structure - token_pricing.input = this.convertPricingToCentsPerMillionTokens( - sourceModel.pricing.inputTokens.basePrice, - sourceData.pricingUnit, - ); - token_pricing.output = this.convertPricingToCentsPerMillionTokens( - sourceModel.pricing.outputTokens.basePrice, - sourceData.pricingUnit, - ); + // Use new canonical conversion function + canonicalPricing = this.convertPricingToCanonical(sourceModel.pricing); + + // Set basic token pricing from canonical (already in cents) + token_pricing = { + input: canonicalPricing.input || 0, + output: canonicalPricing.output || 0, + }; + + // Add thought tokens if present + if (canonicalPricing.thought) { + token_pricing.thought = canonicalPricing.thought; + } - // Add cached pricing if available (Anthropic-specific) + // Add cached pricing if available (legacy support) if (sourceModel.pricing.inputTokens.cachedPrice !== undefined) { if (provider === 'anthropic') { token_pricing.anthropic_cache_read = this.convertPricingToCentsPerMillionTokens( @@ -398,8 +417,6 @@ class ModelCapabilitiesFetcher { sourceModel.pricing.inputTokens.basePrice * 1.25, sourceData.pricingUnit, ); - //} else if (provider === 'google') { - // // [TODO] add support for google tiered pricing } else { token_pricing.cache_read = this.convertPricingToCentsPerMillionTokens( sourceModel.pricing.inputTokens.cachedPrice, @@ -425,6 +442,28 @@ class ModelCapabilitiesFetcher { effectiveDate: effectiveDate, }, featureKey: featureKey, // Add feature key for access control + // NEW: Tiered pricing configuration fields + ...(canonicalPricing.inputTokensTieredConfig && { + inputTokensTieredConfig: canonicalPricing.inputTokensTieredConfig + }), + ...(canonicalPricing.inputTokensCacheTypes && { + inputTokensCacheTypes: canonicalPricing.inputTokensCacheTypes + }), + ...(canonicalPricing.inputTokensContentTypes && { + inputTokensContentTypes: canonicalPricing.inputTokensContentTypes + }), + ...(canonicalPricing.outputTokensTieredConfig && { + outputTokensTieredConfig: canonicalPricing.outputTokensTieredConfig + }), + ...(canonicalPricing.outputTokensCacheTypes && { + outputTokensCacheTypes: canonicalPricing.outputTokensCacheTypes + }), + ...(canonicalPricing.outputTokensContentTypes && { + outputTokensContentTypes: canonicalPricing.outputTokensContentTypes + }), + ...(canonicalPricing.thoughtTokensConfig && { + thoughtTokensConfig: canonicalPricing.thoughtTokensConfig + }), supportedFeatures: { functionCalling: sourceModel.supportedFeatures.functionCalling, json: sourceModel.supportedFeatures.json, @@ -490,6 +529,131 @@ class ModelCapabilitiesFetcher { // //return Math.round(result * 1e12) / 1e12; // } + /** + * Convert pricing from provider JSON to canonical format + * IMPORTANT: This is the ONLY place where dollars -> cents conversion should happen! + * All downstream code (sync script, edge function, handler) should work with cents values. + */ + private convertPricingToCanonical(providerPricing: any): any { + const canonicalPricing: any = {}; + + // Handle input tokens + if (providerPricing.inputTokens) { + if (providerPricing.inputTokens.tieredPricing) { + // New tiered format - create simple token_pricing entry (base tier price) + canonicalPricing.input = providerPricing.inputTokens.tieredPricing.tiers[0].price * 100; // Convert to cents + + // Store full tiered config for settings field (convert all prices to cents) + canonicalPricing.inputTokensTieredConfig = this.convertTieredPricingToCents(providerPricing.inputTokens.tieredPricing); + } else { + // Legacy/non-tiered format + canonicalPricing.input = providerPricing.inputTokens.basePrice * 100; // Convert to cents + } + + // Handle cache types (regardless of whether tiered pricing is present) + if (providerPricing.inputTokens.cacheTypes) { + canonicalPricing.inputTokensCacheTypes = this.convertCacheTypesToCents(providerPricing.inputTokens.cacheTypes); + } + + // Handle content types (regardless of whether tiered pricing is present) + if (providerPricing.inputTokens.contentTypes) { + canonicalPricing.inputTokensContentTypes = this.convertContentTypesToCents(providerPricing.inputTokens.contentTypes); + } + } + + // Handle output tokens + if (providerPricing.outputTokens) { + if (providerPricing.outputTokens.tieredPricing) { + canonicalPricing.output = providerPricing.outputTokens.tieredPricing.tiers[0].price * 100; + + // Store full tiered config for settings field (convert all prices to cents) + canonicalPricing.outputTokensTieredConfig = this.convertTieredPricingToCents(providerPricing.outputTokens.tieredPricing); + } else { + // Legacy/non-tiered format + canonicalPricing.output = providerPricing.outputTokens.basePrice * 100; + } + + // Handle cache types (regardless of whether tiered pricing is present) + if (providerPricing.outputTokens.cacheTypes) { + canonicalPricing.outputTokensCacheTypes = this.convertCacheTypesToCents(providerPricing.outputTokens.cacheTypes); + } + + // Handle content types (regardless of whether tiered pricing is present) + if (providerPricing.outputTokens.contentTypes) { + canonicalPricing.outputTokensContentTypes = this.convertContentTypesToCents(providerPricing.outputTokens.contentTypes); + } + } + + // Handle thought tokens (standardized terminology) + if (providerPricing.thoughtTokens) { + canonicalPricing.thought = providerPricing.thoughtTokens.basePrice * 100; + canonicalPricing.thoughtTokensConfig = { + basePrice: providerPricing.thoughtTokens.basePrice * 100, // Convert to cents + description: providerPricing.thoughtTokens.description, + tieredPricing: providerPricing.thoughtTokens.tieredPricing ? + this.convertTieredPricingToCents(providerPricing.thoughtTokens.tieredPricing) : undefined, + }; + } + + return canonicalPricing; + } + + /** + * Convert tiered pricing structure from dollars to cents + */ + private convertTieredPricingToCents(tieredPricing: any): any { + return { + ...tieredPricing, + tiers: tieredPricing.tiers.map((tier: any) => ({ + ...tier, + price: tier.price * 100 // Convert to cents + })), + basePrice: tieredPricing.basePrice ? tieredPricing.basePrice * 100 : undefined + }; + } + + /** + * Convert cache types pricing from dollars to cents + */ + private convertCacheTypesToCents(cacheTypes: any): any { + const converted: any = {}; + + for (const [cacheType, config] of Object.entries(cacheTypes)) { + converted[cacheType] = { + ...config as any, + explicitPricing: (config as any).explicitPricing ? { + tiers: (config as any).explicitPricing.tiers.map((tier: any) => ({ + ...tier, + price: tier.price * 100 // Convert to cents + })) + } : undefined + }; + } + + return converted; + } + + /** + * Convert content types pricing from dollars to cents + */ + private convertContentTypesToCents(contentTypes: any): any { + const converted: any = {}; + + for (const [contentType, config] of Object.entries(contentTypes)) { + converted[contentType] = { + ...config as any, + explicitPricing: (config as any).explicitPricing ? { + tiers: (config as any).explicitPricing.tiers.map((tier: any) => ({ + ...tier, + price: tier.price * 100 // Convert to cents + })) + } : undefined + }; + } + + return converted; + } + /** * Convert pricing from source unit to per-million-tokens */ @@ -710,7 +874,7 @@ class ModelCapabilitiesFetcher { try { console.log('🔍 Fetching available models from llm-proxy...'); const config = await fetchSupabaseConfig({ - supabaseConfigUrl: this.config.supabaseUrl, + supabaseConfigUrl: this.config.supabaseConfigUrl, }); const supabaseClient = createClient(config.url, config.anonKey); @@ -805,7 +969,7 @@ class ModelCapabilitiesFetcher { /** * Load environment-specific configuration */ -async function loadEnvironmentConfig(environment: string): Promise<{ authToken?: string; supabaseUrl?: string }> { +async function loadEnvironmentConfig(environment: string): Promise<{ authToken?: string; supabaseConfigUrl?: string }> { const scriptDir = dirname(fromFileUrl(import.meta.url)); const envFile = join(scriptDir, `.env.${environment}`); @@ -827,7 +991,7 @@ async function loadEnvironmentConfig(environment: string): Promise<{ authToken?: console.log(`📁 Loaded environment config from ${envFile}`); return { authToken: config.LLM_PROXY_AUTH_TOKEN, - supabaseUrl: config.SUPABASE_CONFIG_URL, + supabaseConfigUrl: config.SUPABASE_CONFIG_URL, }; } } catch (error) { @@ -848,7 +1012,7 @@ async function main() { // Parse command line arguments const args = parseArgs(Deno.args, { - string: ['output', 'providers', 'source-dir', 'environment', 'supabase-url'], + string: ['output', 'providers', 'source-dir', 'environment', 'supabase-config-url'], boolean: ['validate-only', 'use-api-validation', 'skip-llm-proxy-check'], default: { environment: 'staging', @@ -873,7 +1037,7 @@ async function main() { // Setup configuration const config: FetcherConfig = { environment: args.environment as 'staging' | 'production', - supabaseUrl: args['supabase-url'] || envConfig.supabaseUrl, + supabaseConfigUrl: args['supabase-config-url'] || envConfig.supabaseConfigUrl, outputPath: args.output, sourceDir: args['source-dir'], providersToProcess: args.providers.split(',').map((p) => p.trim()), diff --git a/api/src/auth/config.test.ts b/api/src/auth/config.test.ts index 2aaa0976..ef207999 100644 --- a/api/src/auth/config.test.ts +++ b/api/src/auth/config.test.ts @@ -1,6 +1,6 @@ import { assertEquals, assertThrows } from 'testing/asserts.ts'; import { fetchSupabaseConfig, validateSupabaseConfig } from './config.ts'; -import { ConfigFetchError } from '../types/auth.ts'; +import { ConfigFetchError } from 'api/types/auth.ts'; import { ConfigManagerV2 } from '../../../src/shared/config/configManager.ts'; // Mock ConfigManagerV2 diff --git a/api/src/auth/config.ts b/api/src/auth/config.ts index b92c4183..c559a367 100644 --- a/api/src/auth/config.ts +++ b/api/src/auth/config.ts @@ -1,4 +1,4 @@ -import { ConfigFetchError, type SupabaseConfig } from '../types/auth.ts'; +import { ConfigFetchError, type SupabaseConfig } from 'api/types/auth.ts'; import { logger } from 'shared/logger.ts'; import { getConfigManager } from 'shared/config/configManager.ts'; @@ -48,66 +48,73 @@ export function validateSupabaseConfig(config: unknown): config is SupabaseConfi export async function fetchSupabaseConfig( options: { maxRetries?: number; retryDelay?: number; supabaseConfigUrl?: string } = {}, ): Promise { - const configManager = await getConfigManager(); - const globalConfig = await configManager.getGlobalConfig(); - - // Allow override of config URL via options parameter - const configUrl = options.supabaseConfigUrl || - globalConfig.api.supabaseConfigUrl || - 'https://www.beyondbetter.app/api/v1/config/supabase'; - - // return { - // url: globalConfig.bui.supabaseUrl!, - // anonKey: globalConfig.bui.supabaseAnonKey!, - // verifyUrl: new URL('/auth/verify', 'https://localhost:8080').toString(), - // }; - - const { maxRetries = 3, retryDelay = 5000 } = options; - - for (let attempt = 1; attempt <= maxRetries; attempt++) { - try { - if (attempt > 1) { - logger.info( - `AuthConfig: Fetching Supabase config from BUI [${configUrl}] (attempt ${attempt}/${maxRetries})`, + try { + const configManager = await getConfigManager(); + const globalConfig = await configManager.getGlobalConfig(); + + // Allow override of config URL via options parameter + const configUrl = options.supabaseConfigUrl || + globalConfig.api.supabaseConfigUrl || + 'https://www.beyondbetter.app/api/v1/config/supabase'; + //logger.info('AuthConfig: fetching Supabase config from BUI using: ', configUrl); + + // return { + // url: globalConfig.bui.supabaseUrl!, + // anonKey: globalConfig.bui.supabaseAnonKey!, + // verifyUrl: new URL('/auth/verify', 'https://localhost:8080').toString(), + // }; + + const { maxRetries = 3, retryDelay = 5000 } = options; + + for (let attempt = 1; attempt <= maxRetries; attempt++) { + try { + if (attempt > 1) { + logger.info( + `AuthConfig: Fetching Supabase config from BUI [${configUrl}] (attempt ${attempt}/${maxRetries})`, + ); + } + + // //const httpClient: Deno.HttpClient = Deno.createHttpClient({ caCerts: ['system'] }); + // const fetchArgs: undefined | RequestInit & { client: Deno.HttpClient } = + // configUrl.startsWith('https://localhost:8080') + // ? { client: Deno.createHttpClient({ caCerts: ['system'] }) } + // : undefined; + // const response = await fetch(configUrl, fetchArgs); + if (configUrl.startsWith('https://localhost:8080')) Deno.env.set('DENO_TLS_CA_STORE', 'system'); + const response = await fetch(configUrl); + if (!response.ok) { + throw new Error(`HTTP ${response.status}: ${response.statusText}`); + } + + const config = await response.json(); + + // Validate the config + validateSupabaseConfig(config); + + logger.info('AuthConfig: Successfully fetched and validated Supabase config from BUI'); + return config; + } catch (error) { + const message = error instanceof Error ? error.message : 'Unknown error'; + logger.error( + `AuthConfig: Failed to fetch Supabase config (attempt ${attempt}/${maxRetries}):`, + message, ); - } - // //const httpClient: Deno.HttpClient = Deno.createHttpClient({ caCerts: ['system'] }); - // const fetchArgs: undefined | RequestInit & { client: Deno.HttpClient } = - // configUrl.startsWith('https://localhost:8080') - // ? { client: Deno.createHttpClient({ caCerts: ['system'] }) } - // : undefined; - // const response = await fetch(configUrl, fetchArgs); - if (configUrl.startsWith('https://localhost:8080')) Deno.env.set('DENO_TLS_CA_STORE', 'system'); - const response = await fetch(configUrl); - if (!response.ok) { - throw new Error(`HTTP ${response.status}: ${response.statusText}`); - } + if (attempt === maxRetries) { + logger.error('AuthConfig: Max retry attempts reached. API will not start.'); + throw new ConfigFetchError(message, attempt); + } - const config = await response.json(); - - // Validate the config - validateSupabaseConfig(config); - - logger.info('AuthConfig: Successfully fetched and validated Supabase config from BUI'); - return config; - } catch (error) { - const message = error instanceof Error ? error.message : 'Unknown error'; - logger.error( - `AuthConfig: Failed to fetch Supabase config (attempt ${attempt}/${maxRetries}):`, - message, - ); - - if (attempt === maxRetries) { - logger.error('AuthConfig: Max retry attempts reached. API will not start.'); - throw new ConfigFetchError(message, attempt); + logger.info(`AuthConfig: Retrying in ${retryDelay / 1000} seconds...`); + await new Promise((resolve) => setTimeout(resolve, retryDelay)); } - - logger.info(`AuthConfig: Retrying in ${retryDelay / 1000} seconds...`); - await new Promise((resolve) => setTimeout(resolve, retryDelay)); } - } - // This should never be reached due to throw in the loop, but TypeScript needs it - throw new ConfigFetchError('Failed to fetch config (unreachable)', maxRetries); + // This should never be reached due to throw in the loop, but TypeScript needs it + throw new ConfigFetchError('Failed to fetch config (unreachable)', maxRetries); + } catch (error) { + const message = error instanceof Error ? error.message : 'Unknown fetch error'; + logger.error('AuthConfig: Supabase config fetch failed:', message); + throw new ConfigFetchError(`Fetch config failed: ${message}`, 0); + } } diff --git a/api/src/auth/index.ts b/api/src/auth/index.ts index 3fe217fa..0f4b722b 100644 --- a/api/src/auth/index.ts +++ b/api/src/auth/index.ts @@ -4,8 +4,8 @@ * Provides Supabase authentication integration using Deno's Storage API */ -export { SessionManager } from './session.ts'; +export { UserAuthSession } from './userAuthSession.ts'; export { fetchSupabaseConfig, validateSupabaseConfig } from './config.ts'; // Re-export types -export type { ConfigFetchError, Session, SessionError, SupabaseConfig } from '../types/auth.ts'; +export type { ConfigFetchError, Session, SessionError, SupabaseConfig } from 'api/types/auth.ts'; diff --git a/api/src/auth/session.test.ts b/api/src/auth/session.test.ts index f950673d..a66c7f08 100644 --- a/api/src/auth/session.test.ts +++ b/api/src/auth/session.test.ts @@ -1,6 +1,6 @@ import { assertEquals, assertRejects, assertThrows } from 'testing/asserts.ts'; -import { SessionManager } from './session.ts'; -import { ConfigFetchError } from '../types/auth.ts'; +import { UserAuthSession } from './userAuthSession.ts'; +import { ConfigFetchError } from 'api/types/auth.ts'; // Mock Supabase client const mockSession = { @@ -48,13 +48,13 @@ globalThis.localStorage = { }, }; -Deno.test('SessionManager', async (t) => { - let manager: SessionManager; +Deno.test('UserAuthSession', async (t) => { + let manager: UserAuthSession; // Setup fresh manager for each test function setupManager() { mockStorage.clear(); - manager = new SessionManager(); + manager = new UserAuthSession(); return { manager }; } @@ -112,7 +112,7 @@ Deno.test('SessionManager', async (t) => { assertThrows( () => manager.getClient(), Error, - 'SessionManager not initialized', + 'UserAuthSession not initialized', ); }); @@ -122,7 +122,7 @@ Deno.test('SessionManager', async (t) => { await assertRejects( () => manager.getSession(), Error, - 'SessionManager not initialized', + 'UserAuthSession not initialized', ); }); @@ -147,7 +147,7 @@ Deno.test('SessionManager', async (t) => { assertThrows( () => manager.getClient(), Error, - 'SessionManager not initialized', + 'UserAuthSession not initialized', ); }); diff --git a/api/src/auth/session.ts b/api/src/auth/session.ts deleted file mode 100644 index edb5fee1..00000000 --- a/api/src/auth/session.ts +++ /dev/null @@ -1,259 +0,0 @@ -import { createClient } from '@supabase/supabase-js'; -import { fetchSupabaseConfig } from './config.ts'; -import { logger } from 'shared/logger.ts'; -import { KVStorage } from 'shared/kvStorage.ts'; -import { ToolsAccess } from 'shared/features.ts'; -import type { Session, SupabaseConfig } from '../types/auth.ts'; -import type { - SupabaseClientAuth, - SupabaseClientBilling, - SupabaseClientCore, - SupabaseClientLlm, - SupabaseClientMarketing, - SupabaseClientWithSchema, -} from 'shared/types/supabase.ts'; - -/** - * Supabase Client Factory - * Creates schema-specific Supabase clients for different use cases - */ -export class SupabaseClientFactory { - private static config: SupabaseConfig | null = null; - private static clientCache = new Map(); - - /** - * Initialize the factory with Supabase configuration - */ - static async initialize(): Promise { - if (!SupabaseClientFactory.config) { - SupabaseClientFactory.config = await fetchSupabaseConfig(); - } - } - - /** - * Create a Supabase client for a specific schema - * @param schema - The database schema to use (e.g., 'abi_core', 'public') - * @param useAuth - Whether to include auth configuration (default: false) - * @returns Configured Supabase client - */ - static async createClient< - T extends 'abi_billing' | 'abi_llm' | 'abi_auth' | 'abi_core' | 'abi_marketing' | 'public', - >( - schema: T, - useAuth = false, - ): Promise> { - await SupabaseClientFactory.initialize(); - - if (!SupabaseClientFactory.config) { - throw new Error('SupabaseClientFactory: Configuration not initialized'); - } - - // Use cache key to avoid creating duplicate clients - const cacheKey = `${schema}_${useAuth ? 'auth' : 'noauth'}`; - const cachedClient = SupabaseClientFactory.clientCache.get(cacheKey); - if (cachedClient) { - return cachedClient; - } - - const clientOptions: any = { - db: { schema }, - }; - - // Only add auth configuration if requested (for session management) - if (useAuth) { - const storage = new KVStorage({ - prefix: 'supabase_auth', - filename: 'auth.kv', - }); - await storage.initialize(); - - clientOptions.auth = { - storage, - autoRefreshToken: true, - persistSession: true, - detectSessionInUrl: false, - }; - } - - // Use type assertion to ensure the client is properly typed with the schema - const client = createClient( - SupabaseClientFactory.config.url, - SupabaseClientFactory.config.anonKey, - clientOptions, - ) as SupabaseClientWithSchema; - - // Cache the client - SupabaseClientFactory.clientCache.set(cacheKey, client); - - return client; - } - - /** - * Get the cached configuration - */ - static getConfig(): SupabaseConfig | null { - return SupabaseClientFactory.config; - } - - /** - * Clear the client cache (useful for testing or cleanup) - */ - static clearCache(): void { - SupabaseClientFactory.clientCache.clear(); - } -} - -/** - * Manages Supabase authentication session - * - Initializes Supabase client with Deno KV Storage - * - Handles session refresh - * - Manages single auth session - */ -export class SessionManager { - private supabaseClient: SupabaseClientWithSchema<'public'> | null = null; - private supabaseClientBilling: SupabaseClientWithSchema<'abi_billing'> | null = null; - private supabaseClientCore: SupabaseClientWithSchema<'abi_core'> | null = null; - private config: SupabaseConfig | null = null; - private storage: KVStorage; - - constructor() { - // Initialize KVStorage with auth-specific settings - this.storage = new KVStorage({ - prefix: 'supabase_auth', - filename: 'auth.kv', // Store auth data in separate file - }); - } - - /** - * Initialize the session manager - * - Initializes KV storage - * - Fetches Supabase config - * - Sets up Supabase client with storage - * - Starts auto refresh - */ - async initialize(): Promise { - try { - // Initialize KV storage first - await this.storage.initialize(); - - // Initialize the factory and get auth-enabled client - await SupabaseClientFactory.initialize(); - this.config = SupabaseClientFactory.getConfig(); - this.supabaseClient = await SupabaseClientFactory.createClient('public', true); - this.supabaseClientBilling = await SupabaseClientFactory.createClient('abi_billing', true); - this.supabaseClientCore = await SupabaseClientFactory.createClient('abi_core', true); - - // Enable auto refresh - if (this.supabaseClient) { - await this.supabaseClient.auth.startAutoRefresh(); - } - logger.info('SessionManager: initialized successfully'); - } catch (error) { - logger.error('SessionManager: Failed to initialize SessionManager:', error); - throw error; - } - } - - /** - * Get the current session if any - */ - async getSession(): Promise { - if (!this.supabaseClient) { - throw new Error('SessionManager not initialized'); - } - - try { - const { data: { session }, error } = await this.supabaseClient.auth.getSession(); - - if (error) { - throw error; - } - - return session; - } catch (error) { - logger.error('SessionManager: Error getting session:', error); - return null; - } - } - - /** - * Clear the current session - */ - async clearSession(): Promise { - if (!this.supabaseClient) { - throw new Error('SessionManager not initialized'); - } - - try { - await this.supabaseClient.auth.signOut(); - this.storage.clear(); - logger.info('SessionManager: Session cleared successfully'); - } catch (error) { - logger.error('SessionManager: Error clearing session:', error); - throw error; - } - } - - /** - * Check whether user is allowed access with external tools feature - */ - async hasExternalToolsAccess(): Promise { - if (!this.supabaseClientCore || !this.supabaseClientBilling) { - throw new Error('SessionManager not initialized'); - } - - const session = await this.getSession(); - if (!session?.user?.id) { - throw new Error('SessionManager has no valid user in session'); - } - - return await ToolsAccess.hasExternalTools(this.supabaseClientCore, this.supabaseClientBilling, session.user.id); - } - - /** - * Clean up resources - */ - async destroy(): Promise { - if (this.supabaseClient) { - await this.supabaseClient.auth.stopAutoRefresh(); - this.supabaseClient = null; - } - this.storage.clear(); - await this.storage.close(); // Ensure proper cleanup - this.config = null; - } - - /** - * Get the verification URL for email signups - * Throws if not initialized - */ - getVerifyUrl(): string { - if (!this.config) { - throw new Error('SessionManager not initialized'); - } - return this.config.verifyUrl; - } - - /** - * Get the Supabase client instance - * Throws if not initialized - */ - getClient(): SupabaseClientWithSchema<'public'> { - if (!this.supabaseClient) { - throw new Error('SessionManager not initialized'); - } - return this.supabaseClient; - } - getBillingClient(): SupabaseClientWithSchema<'abi_billing'> { - if (!this.supabaseClientBilling) { - throw new Error('SessionManager not initialized'); - } - return this.supabaseClientBilling; - } - getCoreClient(): SupabaseClientWithSchema<'abi_core'> { - if (!this.supabaseClientCore) { - throw new Error('SessionManager not initialized'); - } - return this.supabaseClientCore; - } -} diff --git a/api/src/auth/sessionRegistry.ts b/api/src/auth/sessionRegistry.ts new file mode 100644 index 00000000..adaea086 --- /dev/null +++ b/api/src/auth/sessionRegistry.ts @@ -0,0 +1,348 @@ +import type { UserContext } from 'shared/types/userContext.ts'; +import { UserAuthSession } from 'api/auth/userAuthSession.ts'; +import { logger } from 'shared/logger.ts'; +import { AuthenticationError } from 'api/errors/error.ts'; + +/** + * API token information for request authentication + */ +export interface ApiTokenInfo { + userId: string; + tokenId: string; + scopes: string[]; + expiresAt?: number; + metadata?: Record; +} + +/** + * Session registry that manages multiple user userAuthSessions and API tokens + * Provides thread-safe access to user contexts without passing UserAuthSession around + */ +export class SessionRegistry { + private static instance: SessionRegistry; + private userAuthSessions = new Map(); + private userContexts = new Map(); + private apiTokens = new Map(); + + // Request-scoped context (using AsyncLocalStorage pattern for Deno) + // Or, temporaray solution until full multi-user support is implemented + private currentContext: UserContext | null = null; + + private constructor() { + logger.info('SessionRegistry: Initializing session registry'); + } + + static getInstance(): SessionRegistry { + if (!SessionRegistry.instance) { + SessionRegistry.instance = new SessionRegistry(); + } + return SessionRegistry.instance; + } + + // ============================================================================ + // SESSION MANAGEMENT + // ============================================================================ + + /** + * Register a new user session + */ + async registerSession(userId: string): Promise { + logger.info(`SessionRegistry: Registering session for user: ${userId}`); + + let userAuthSession: UserAuthSession | undefined = this.userAuthSessions.get(userId); + if (!userAuthSession) { + logger.info(`SessionRegistry: Creating userAuthSession for user: ${userId}`); + // Create new session manager for this user + userAuthSession = await new UserAuthSession(userId).initialize(); + logger.info(`SessionRegistry: Created userAuthSession for user: ${userId}`); + + // Store session + this.userAuthSessions.set(userId, userAuthSession); + } + + // Create user context + const session = await userAuthSession.getSession(); + //logger.info(`SessionRegistry: Got session for user: ${userId}`, session); + if (!session?.user) { + logger.warn(`SessionRegistry: No session for user: ${userId}`); + // throw new AuthenticationError('Invalid session - no user found', { + // name: 'SessionRegistration', + // userId, + // }); + } + + const userContext: UserContext = { + userId, + user: session?.user || { id: userId, email: '' }, + userAuthSession, + }; + this.userContexts.set(userId, userContext); + + logger.info(`SessionRegistry: Session registered successfully for user: ${userId}`); + return userContext; + } + + /** + * Remove a user session and clean up resources + */ + async removeSession(userId: string): Promise { + logger.info(`SessionRegistry: Removing session for user: ${userId}`); + + const userAuthSession = this.userAuthSessions.get(userId); + if (userAuthSession) { + await userAuthSession.clearSession(); + //await userAuthSession.destroy(); + this.userAuthSessions.delete(userId); + } + + this.userContexts.delete(userId); + + // Remove all API tokens for this user + const tokensToRemove = Array.from(this.apiTokens.entries()) + .filter(([_, tokenInfo]) => tokenInfo.userId === userId) + .map(([tokenId]) => tokenId); + + tokensToRemove.forEach((tokenId) => this.apiTokens.delete(tokenId)); + + logger.info(`SessionRegistry: Session removed for user: ${userId}`); + } + + /** + * Get session manager for a user + */ + getUserAuthSession(userId: string): UserAuthSession | null { + return this.userAuthSessions.get(userId) || null; + } + + /** + * Get user context for a user + */ + getUserContext(userId: string): UserContext | null { + return this.userContexts.get(userId) || null; + } + + /** + * List all active user userAuthSessions + */ + getActiveSessions(): string[] { + return Array.from(this.userAuthSessions.keys()); + } + + // ============================================================================ + // API TOKEN MANAGEMENT + // ============================================================================ + + /** + * Generate an API token for a user + */ + // deno-lint-ignore require-await + async generateApiToken( + userId: string, + scopes: string[] = ['api:read', 'api:write'], + expiresIn?: number, + metadata?: Record, + ): Promise { + // Verify user has active session + if (!this.userAuthSessions.has(userId)) { + throw new AuthenticationError('User session not found', { + name: 'TokenGeneration', + userId, + }); + } + + // Generate secure token + const tokenId = crypto.randomUUID(); + const tokenSecret = crypto.randomUUID(); + const token = `bb_${tokenId}_${tokenSecret}`; + + const expiresAt = expiresIn ? Date.now() + (expiresIn * 1000) : undefined; + + const tokenInfo: ApiTokenInfo = { + userId, + tokenId, + scopes, + expiresAt, + metadata, + }; + + this.apiTokens.set(token, tokenInfo); + + logger.info(`SessionRegistry: Generated API token for user: ${userId}, scopes: ${scopes.join(',')}`); + return token; + } + + /** + * Validate an API token and return user context + */ + // deno-lint-ignore require-await + async validateApiToken(token: string): Promise { + if (!token.startsWith('bb_')) { + return null; + } + + const tokenInfo = this.apiTokens.get(token); + if (!tokenInfo) { + return null; + } + + // Check expiration + if (tokenInfo.expiresAt && Date.now() > tokenInfo.expiresAt) { + this.apiTokens.delete(token); + logger.warn(`SessionRegistry: Expired API token removed for user: ${tokenInfo.userId}`); + return null; + } + + const userContext = this.userContexts.get(tokenInfo.userId); + if (!userContext) { + // Clean up orphaned token + this.apiTokens.delete(token); + return null; + } + + return userContext; + } + + /** + * Revoke an API token + */ + revokeApiToken(token: string): boolean { + return this.apiTokens.delete(token); + } + + /** + * Revoke all API tokens for a user + */ + revokeUserTokens(userId: string): number { + const tokensToRevoke = Array.from(this.apiTokens.entries()) + .filter(([_, tokenInfo]) => tokenInfo.userId === userId) + .map(([token]) => token); + + tokensToRevoke.forEach((token) => this.apiTokens.delete(token)); + + logger.info(`SessionRegistry: Revoked ${tokensToRevoke.length} API tokens for user: ${userId}`); + return tokensToRevoke.length; + } + + // ============================================================================ + // CONTEXT MANAGEMENT + // ============================================================================ + + /** + * Set current request context (for use in request middleware) + */ + setCurrentContext(context: UserContext): void { + this.currentContext = context; + } + + /** + * Get current request context + */ + getCurrentContext(): UserContext | null { + return this.currentContext; + } + + /** + * Update user context with project/collaboration info + */ + updateUserContext( + userId: string, + updates: Partial>, + ): void { + const context = this.userContexts.get(userId); + if (context) { + Object.assign(context, updates); + } + } + + /** + * Execute a function with a specific user context + */ + async withUserContext(userId: string, fn: (context: UserContext) => Promise): Promise { + const context = this.userContexts.get(userId); + if (!context) { + throw new AuthenticationError('User context not found', { + name: 'ContextExecution', + userId, + }); + } + + const previousContext = this.currentContext; + this.currentContext = context; + + try { + return await fn(context); + } finally { + this.currentContext = previousContext; + } + } + + /** + * Get current user session manager (convenience method) + */ + getCurrentUserAuthSession(): UserAuthSession | null { + return this.currentContext?.userAuthSession || null; + } + + /** + * Get current user ID (convenience method) + */ + getCurrentUserId(): string | null { + return this.currentContext?.userId || null; + } + + // ============================================================================ + // CLEANUP + // ============================================================================ + + /** + * Clean up expired tokens and inactive userAuthSessions + */ + // deno-lint-ignore require-await + async cleanup(): Promise { + logger.info('SessionRegistry: Starting cleanup'); + + // Remove expired tokens + const now = Date.now(); + const expiredTokens = Array.from(this.apiTokens.entries()) + .filter(([_, tokenInfo]) => tokenInfo.expiresAt && now > tokenInfo.expiresAt) + .map(([token]) => token); + + expiredTokens.forEach((token) => this.apiTokens.delete(token)); + + if (expiredTokens.length > 0) { + logger.info(`SessionRegistry: Removed ${expiredTokens.length} expired API tokens`); + } + + logger.info('SessionRegistry: Cleanup completed'); + } + + /** + * Shutdown and clean up all userAuthSessions + */ + async shutdown(): Promise { + logger.info('SessionRegistry: Starting shutdown'); + + // Clean up all userAuthSessions + const cleanupPromises = Array.from(this.userAuthSessions.entries()).map(async ([userId, userAuthSession]) => { + try { + await userAuthSession.destroy(); + logger.debug(`SessionRegistry: Cleaned up session for user: ${userId}`); + } catch (error) { + logger.error(`SessionRegistry: Error cleaning up session for user ${userId}:`, error); + } + }); + + await Promise.allSettled(cleanupPromises); + + // Clear all maps + this.userAuthSessions.clear(); + this.apiTokens.clear(); + this.userContexts.clear(); + this.currentContext = null; + + logger.info('SessionRegistry: Shutdown completed'); + } +} + +// Export the SessionRegistry class so consumers can call SessionRegistry.getInstance() +// This avoids circular dependency issues caused by top-level instantiation diff --git a/api/src/auth/supabaseClientFactory.ts b/api/src/auth/supabaseClientFactory.ts new file mode 100644 index 00000000..656daa77 --- /dev/null +++ b/api/src/auth/supabaseClientFactory.ts @@ -0,0 +1,182 @@ +import { createClient } from '@supabase/supabase-js'; +import type { SupabaseClient, SupabaseClientOptions } from '@supabase/supabase-js'; +import { fetchSupabaseConfig } from 'api/auth/config.ts'; +import { logger } from 'shared/logger.ts'; +import { KVAuthStorage } from 'shared/kvAuthStorage.ts'; +import type { SupabaseConfig } from 'api/types/auth.ts'; +import type { + // SupabaseClientAuth, + // SupabaseClientBilling, + // SupabaseClientCore, + // SupabaseClientLlm, + // SupabaseClientMarketing, + SupabaseClientWithSchema, +} from 'shared/types/supabase.ts'; +import type { UserContext } from 'shared/types/app.ts'; + +type BbAbiSchema = 'abi_api' | 'abi_billing' | 'abi_llm' | 'abi_auth' | 'abi_core' | 'abi_marketing' | 'public'; +/** + * Standalone Supabase Client Factory + * Creates schema-specific Supabase clients for different use cases + * + * Usage: + * - clientNoAuth: For operations that don't require user authentication (signup, login, reset password) + * - clientWithAuth: For operations that require user authentication (user updates, authorized API calls) + */ +export class SupabaseClientFactory { + private static config: SupabaseConfig | null = null; + private static clientCache = new Map(); + private static closing = false; + private static isInitialized = false; + + /** + * Initialize the factory with Supabase configuration + */ + static async initialize(): Promise { + if (SupabaseClientFactory.isInitialized) return; + if (!SupabaseClientFactory.config) { + SupabaseClientFactory.config = await fetchSupabaseConfig(); + } + SupabaseClientFactory.isInitialized = true; + } + + /** + * Create a Supabase client for a specific schema + * @param schema - The database schema to use (e.g., 'abi_core', 'public') + * @param useAuth - Whether to include auth configuration (default: false) + * @returns Configured Supabase client + */ + static async createClient< + T extends BbAbiSchema, + >( + schema: T, + useAuth = false, + userContext?: UserContext | null, // login needs client with storage, but no userConext yet + ): Promise> { + await SupabaseClientFactory.initialize(); + + if (!SupabaseClientFactory.config) { + throw new Error('SupabaseClientFactory: Configuration not initialized'); + } + + // Use cache key to avoid creating duplicate clients + const cacheKey = `${schema}_${useAuth ? 'auth' : 'noauth'}`; + const cachedClient = SupabaseClientFactory.clientCache.get(cacheKey); + if (cachedClient) { + return cachedClient; + } + + const clientOptions: SupabaseClientOptions = { + db: { schema }, + }; + + // Only add auth configuration if requested + if (useAuth) { + const storage = await KVAuthStorage.getStorage(userContext?.userId || ''); // we should allow null to indicate no key/prefix yet, so new entries can be added + + clientOptions.auth = { + storage, + autoRefreshToken: true, + persistSession: true, + detectSessionInUrl: false, + }; + } + + // Use type assertion to ensure the client is properly typed with the schema + const client = createClient( + SupabaseClientFactory.config.url, + SupabaseClientFactory.config.anonKey, + clientOptions, + ) as SupabaseClientWithSchema; + + // Cache the client + SupabaseClientFactory.clientCache.set(cacheKey, client); + + return client; + } + + /** + * Create a client without authentication (for signup, login, reset password operations) + * @param schema - The database schema to use (typically 'public' for auth operations) + * @returns Non-authenticated Supabase client + */ + static async createClientNoAuth< + T extends BbAbiSchema, + >( + schema: T, + ): Promise> { + logger.debug(`SupabaseClientFactory: Creating clientNoAuth for schema: ${schema}`); + return await SupabaseClientFactory.createClient(schema, false); + } + + /** + * Create a client with authentication (for operations requiring user context) + * @param schema - The database schema to use + * @returns Authenticated Supabase client + */ + static async createClientWithAuth< + T extends BbAbiSchema, + >( + schema: T, + userContext: UserContext | null, // login needs client with storage, but no userConext yet + ): Promise> { + logger.debug(`SupabaseClientFactory: Creating clientWithAuth for schema: ${schema}`); + return await SupabaseClientFactory.createClient(schema, true, userContext); + } + + static async getCoreClient( + userContext: UserContext, + ): Promise> { + return await SupabaseClientFactory.createClientWithAuth('abi_core', userContext); + } + static async getAuthClient( + userContext: UserContext, + ): Promise> { + return await SupabaseClientFactory.createClientWithAuth('abi_auth', userContext); + } + static async getBillingClient( + userContext: UserContext, + ): Promise> { + return await SupabaseClientFactory.createClientWithAuth('abi_billing', userContext); + } + static async getClient( + userContext: UserContext | null, // login needs client with storage, but no userConext yet + ): Promise> { + return await SupabaseClientFactory.createClientWithAuth('public', userContext); + } + static async getClientNoAuth(): Promise> { + return await SupabaseClientFactory.createClientNoAuth('public'); + } + + /** + * Get the cached configuration + */ + static getConfig(): SupabaseConfig | null { + return SupabaseClientFactory.config; + } + + /** + * Close Supabase clients and clear caches (called during API shutdown) + * Note: Auth storage is managed by KVAuthStorage + */ + // deno-lint-ignore require-await + static async close(): Promise { + if (SupabaseClientFactory.closing) return; + SupabaseClientFactory.closing = true; + + try { + // Clear client cache on shutdown + SupabaseClientFactory.clientCache.clear(); + logger.debug('SupabaseClientFactory: Client cache cleared'); + } finally { + SupabaseClientFactory.closing = false; + } + } + + /** + * Clear the client cache (useful for testing or cleanup) + */ + static clearCache(): void { + SupabaseClientFactory.clientCache.clear(); + } +} diff --git a/api/src/auth/test_storage_read.ts b/api/src/auth/test_storage_read.ts index 312f3adf..1094b73b 100644 --- a/api/src/auth/test_storage_read.ts +++ b/api/src/auth/test_storage_read.ts @@ -1,4 +1,4 @@ -import { KVStorage } from '../../shared/storage/kvStorage.ts'; +import { KVStorage } from 'shared/kvStorage.ts'; const key = 'test_storage'; diff --git a/api/src/auth/test_storage_write.ts b/api/src/auth/test_storage_write.ts index 67fa3f40..067e99cf 100644 --- a/api/src/auth/test_storage_write.ts +++ b/api/src/auth/test_storage_write.ts @@ -1,4 +1,4 @@ -import { KVStorage } from '../../shared/storage/kvStorage.ts'; +import { KVStorage } from 'shared/kvStorage.ts'; import { delay } from '@std/async'; const timestamp = new Date().toISOString(); diff --git a/api/src/auth/userAuthSession.ts b/api/src/auth/userAuthSession.ts new file mode 100644 index 00000000..db1aeb18 --- /dev/null +++ b/api/src/auth/userAuthSession.ts @@ -0,0 +1,195 @@ +import { logger } from 'shared/logger.ts'; +import type { Session, SupabaseConfig } from 'api/types/auth.ts'; +import type { SupabaseClientWithSchema } from 'shared/types/supabase.ts'; +import { SupabaseClientFactory } from 'api/auth/supabaseClientFactory.ts'; +import { KVAuthStorage } from 'shared/kvAuthStorage.ts'; +import type { UserContext } from 'shared/types/userContext.ts'; + +/** + * Manages Supabase authentication session + * - Initializes Supabase client with Deno KV Storage + * - Handles session refresh + * - Manages single auth session + */ +export class UserAuthSession { + private supabaseClient: SupabaseClientWithSchema<'public'> | null = null; + private supabaseClientAuth: SupabaseClientWithSchema<'abi_auth'> | null = null; + private supabaseClientBilling: SupabaseClientWithSchema<'abi_billing'> | null = null; + private supabaseClientCore: SupabaseClientWithSchema<'abi_core'> | null = null; + private config: SupabaseConfig | null = null; + + constructor(private userId: string) {} + + /** + * Initialize the session manager + * - Uses shared KVStorage from SupabaseClientFactory + * - Fetches Supabase config + * - Sets up Supabase client with shared storage + * - Starts auto refresh + */ + async initialize(): Promise { + try { + // Initialize the factory (which handles shared KVStorage) + await SupabaseClientFactory.initialize(); + this.config = SupabaseClientFactory.getConfig(); + + const userContext: UserContext = { + userId: this.userId, + user: { + id: this.userId, + email: '', + }, + userAuthSession: this, + }; + + this.supabaseClient = await SupabaseClientFactory.getClient(userContext); + this.supabaseClientAuth = await SupabaseClientFactory.getAuthClient(userContext); + this.supabaseClientBilling = await SupabaseClientFactory.getBillingClient(userContext); + this.supabaseClientCore = await SupabaseClientFactory.getCoreClient(userContext); + logger.info('UserAuthSession: created supabase clients'); + + // Enable auto refresh + if (this.supabaseClient) { + await this.supabaseClient.auth.startAutoRefresh(); + } + logger.info('UserAuthSession: initialized successfully'); + + return this; + } catch (error) { + logger.error('UserAuthSession: Failed to initialize UserAuthSession:', error); + throw error; + } + } + + /** + * Get the current session if any + */ + async getSession(): Promise { + if (!this.supabaseClient) { + throw new Error('UserAuthSession not initialized'); + } + + try { + const { data: { session }, error } = await this.supabaseClient.auth.getSession(); + + if (error) { + throw error; + } + + return session; + } catch (error) { + logger.error('UserAuthSession: Error getting session:', error); + return null; + } + } + + /** + * Clear the current session + */ + async clearSession(): Promise { + if (!this.supabaseClient) { + throw new Error('UserAuthSession not initialized'); + } + + try { + await this.supabaseClient.auth.signOut(); + // Clear shared auth storage + const storage = await KVAuthStorage.getStorage(this.userId); + storage.clear(); + logger.info('UserAuthSession: Session cleared successfully'); + } catch (error) { + logger.error('UserAuthSession: Error clearing session:', error); + throw error; + } + } + + /** + * Clean up resources + * Note: Shared KVAuthStorage is managed centrally, not closed here + */ + async destroy(): Promise { + if (this.supabaseClient) { + await this.supabaseClient.auth.stopAutoRefresh(); + this.supabaseClient = null; + } + this.config = null; + } + + /** + * Get the verification URL for email signups + * Throws if not initialized + */ + getVerifyUrl(): string { + if (!this.config) { + throw new Error('UserAuthSession not initialized'); + } + return this.config.verifyUrl; + } + + /** + * Get the Supabase client instance + * @deprecated Use SupabaseClientFactory.createClient() instead + * Throws if not initialized + */ + getClient(): SupabaseClientWithSchema<'public'> { + logger.warn( + 'UserAuthSession: 🚨 DEPRECATED: userAuthSession.getClient() called - migrate to SupabaseClientFactory.createClient()', + ); + logger.warn( + 'UserAuthSession: 🚨 Called from:', + new Error().stack?.split('\n')[2]?.trim() || 'unknown location', + ); + if (!this.supabaseClient) { + throw new Error('UserAuthSession not initialized'); + } + return this.supabaseClient; + } + /** + * @deprecated Use SupabaseClientFactory.createClient('abi_auth') instead + */ + getAuthClient(): SupabaseClientWithSchema<'abi_auth'> { + logger.warn( + "UserAuthSession: 🚨 DEPRECATED: userAuthSession.getAuthClient() called - migrate to SupabaseClientFactory.createClient('abi_auth')", + ); + logger.warn( + 'UserAuthSession: 🚨 Called from:', + new Error().stack?.split('\n')[2]?.trim() || 'unknown location', + ); + if (!this.supabaseClientAuth) { + throw new Error('UserAuthSession not initialized'); + } + return this.supabaseClientAuth; + } + /** + * @deprecated Use SupabaseClientFactory.createClient('abi_billing') instead + */ + getBillingClient(): SupabaseClientWithSchema<'abi_billing'> { + logger.warn( + "UserAuthSession: 🚨 DEPRECATED: userAuthSession.getBillingClient() called - migrate to SupabaseClientFactory.createClient('abi_billing')", + ); + logger.warn( + 'UserAuthSession: 🚨 Called from:', + new Error().stack?.split('\n')[2]?.trim() || 'unknown location', + ); + if (!this.supabaseClientBilling) { + throw new Error('UserAuthSession not initialized'); + } + return this.supabaseClientBilling; + } + /** + * @deprecated Use SupabaseClientFactory.createClient('abi_core') instead + */ + getCoreClient(): SupabaseClientWithSchema<'abi_core'> { + logger.warn( + "UserAuthSession: 🚨 DEPRECATED: userAuthSession.getCoreClient() called - migrate to SupabaseClientFactory.createClient('abi_core')", + ); + logger.warn( + 'UserAuthSession: 🚨 Called from:', + new Error().stack?.split('\n')[2]?.trim() || 'unknown location', + ); + if (!this.supabaseClientCore) { + throw new Error('UserAuthSession not initialized'); + } + return this.supabaseClientCore; + } +} diff --git a/api/src/controllers/agentController.ts b/api/src/controllers/agentController.ts index c1b4f841..8bca03a2 100644 --- a/api/src/controllers/agentController.ts +++ b/api/src/controllers/agentController.ts @@ -97,7 +97,7 @@ class AgentController extends BaseController { logger.info( `AgentController: createAgentInteraction - creating interaction for: ${agentInteractionId} with parent ${this.orchestratorInteractionId}`, ); - const interactionModel = this.projectConfig.defaultModels?.agent ?? 'claude-sonnet-4-20250514'; + const interactionModel = this.projectConfig.defaultModels?.agent ?? 'claude-sonnet-4-5-20250929'; const agentInteraction = await this.interactionManager.createInteraction( collaboration, 'conversation', diff --git a/api/src/controllers/baseController.ts b/api/src/controllers/baseController.ts index 2c728228..f0fdc781 100644 --- a/api/src/controllers/baseController.ts +++ b/api/src/controllers/baseController.ts @@ -1,7 +1,6 @@ import * as diff from 'diff'; -import type InteractionManager from 'api/llms/interactionManager.ts'; -import { interactionManager } from 'api/llms/interactionManager.ts'; +import { InteractionManager } from 'api/llms/interactionManager.ts'; import type CollaborationManager from 'api/collaborations/collaborationManager.ts'; import { collaborationManager } from 'api/collaborations/collaborationManager.ts'; import Collaboration from 'api/collaborations/collaboration.ts'; @@ -84,13 +83,13 @@ class BaseController { this._controllerType = 'base'; this.projectEditorRef = new WeakRef(projectEditor); this.collaborationManager = collaborationManager; - this.interactionManager = interactionManager; + this.interactionManager = InteractionManager.getInstance(); } async init(): Promise { const configManager = await getConfigManager(); this.projectConfig = await configManager.getProjectConfig(this.projectEditor.projectId); - this.toolManager = await new LLMToolManager(this.projectConfig, this.projectEditor.sessionManager, 'core') + this.toolManager = await new LLMToolManager(this.projectConfig, this.projectEditor.userContext, 'core') .init(); this.eventManager = EventManager.getInstance(); this.promptManager = await new PromptManager().init(this.projectEditor.projectId); @@ -390,7 +389,7 @@ class BaseController { interactionId: InteractionId, ): Promise { logger.info(`BaseController: Creating new interaction: ${interactionId}`); - const interactionModel = this.projectConfig.defaultModels?.orchestrator ?? 'claude-sonnet-4-20250514'; + const interactionModel = this.projectConfig.defaultModels?.orchestrator ?? 'claude-sonnet-4-5-20250929'; const interaction = await this.interactionManager.createInteraction( collaboration, 'conversation', @@ -508,6 +507,31 @@ class BaseController { } } + /** + * Emergency save for interaction state when no currentResponse is available. + * Used in error scenarios to preserve interaction state even when processing fails. + */ + async saveInteractionEmergency( + interaction: LLMConversationInteraction, + ): Promise { + try { + const persistence = await new InteractionPersistence( + interaction.collaboration.id, + interaction.id, + this.projectEditor, + ).init(); + + // Save interaction state without requiring currentResponse + // This preserves messages, token usage, tool metrics, etc. + await persistence.saveInteraction(interaction); + + logger.info(`BaseController: Emergency save completed for interaction: ${interaction.id}`); + } catch (error) { + logger.error(`BaseController: Error in emergency save for interaction ${interaction.id}:`, error); + // Don't re-throw - we don't want to mask the original error + } + } + async deleteInteraction(collaborationId: CollaborationId, interactionId: InteractionId): Promise { logger.info(`BaseController: Deleting interaction: ${interactionId}`); diff --git a/api/src/controllers/orchestratorController.ts b/api/src/controllers/orchestratorController.ts index e8ebc194..f0c1ac92 100644 --- a/api/src/controllers/orchestratorController.ts +++ b/api/src/controllers/orchestratorController.ts @@ -3,7 +3,7 @@ import { exists } from '@std/fs'; import type ProjectEditor from 'api/editor/projectEditor.ts'; import type { ProjectInfo } from 'api/editor/projectEditor.ts'; //import type InteractionManager from 'api/llms/interactionManager.ts'; -//import { interactionManager } from 'api/llms/interactionManager.ts'; +//import { InteractionManager } from 'api/llms/interactionManager.ts'; //import type ProjectEditor from 'api/editor/projectEditor.ts'; import type { LLMAnswerToolUse } from 'api/llms/llmMessage.ts'; //import type LLMTool from 'api/llms/llmTool.ts'; @@ -37,6 +37,8 @@ import type { //TokenUsage, //TokenUsageStatsForCollaboration, } from 'shared/types.ts'; +import type { SamplingCreateMessageParams, SamplingMessage } from 'api/types/mcp.ts'; +import type { LLMMessageContentPart, LLMMessageContentParts } from 'api/llms/llmMessage.ts'; import { ApiStatus } from 'shared/types.ts'; //import { ErrorType, isLLMError, type LLMError, type LLMErrorOptions } from 'api/errors/error.ts'; import { isLLMError } from 'api/errors/error.ts'; @@ -730,6 +732,21 @@ class OrchestratorController extends BaseController { (error as Error).message }`, ); + + // Emergency save on turn-level errors to preserve partial progress + try { + await this.saveInteractionEmergency(interaction); + logger.debug( + `OrchestratorController: Emergency save completed after turn ${loopTurnCount} error`, + ); + } catch (saveError) { + logger.warn( + `OrchestratorController: Emergency save failed after turn error: ${ + (saveError as Error).message + }`, + ); + } + if (loopTurnCount === maxTurns - 1) { throw error; // If it's the last turn, throw the error to be caught by the outer try-catch } @@ -871,6 +888,23 @@ class OrchestratorController extends BaseController { logger.error( `OrchestratorController: Error in handle statement: ${(error as Error).message}`, ); + + // CRITICAL: Emergency save to preserve interaction state despite the error + // This ensures messages, token usage, tool metrics, etc. are not lost + try { + collaboration.updateLastInteraction(interaction); + await this.saveCollaboration(collaboration); + await this.saveInteractionEmergency(interaction); + logger.info(`OrchestratorController: Emergency save completed for interaction ${interaction.id}`); + } catch (saveError) { + logger.error( + `OrchestratorController: Emergency save failed for interaction ${interaction.id}: ${ + (saveError as Error).message + }`, + ); + // Continue - don't let save errors mask the original error + } + const statementAnswer: CollaborationResponse = { logEntry: { entryType: 'answer', content: 'Error handling statement', thinking: '' }, collaborationId: collaborationId, @@ -972,6 +1006,114 @@ class OrchestratorController extends BaseController { // // logger.info('OrchestratorController: Delegated tasks completed', { results }); } + + /** + * Generate response for MCP sampling request + * Similar to generateCollaborationTitle but for sampling requests + */ + async generateSamplingResponse( + params: SamplingCreateMessageParams, + collaboration: Collaboration, + interactionId: InteractionId, + mcpServerDetails: { name?: string; id: string }, + speakOptions?: LLMSpeakWithOptions, + ): Promise { + const serverName = mcpServerDetails.name || mcpServerDetails.id; + const chatTitle = `Sampling request for ${serverName}`; + + // Create chat interaction for sampling + const chatInteraction = await this.createChatInteraction( + collaboration, + interactionId, + chatTitle, + ); + + // Convert SamplingMessage[] to BB format and handle system messages + const systemMessages: string[] = []; + const conversationMessages: { role: 'user' | 'assistant'; content: LLMMessageContentParts }[] = []; + + // Convert MCP messages to BB format + for (const samplingMsg of params.messages) { + if (samplingMsg.role === 'system') { + // Collect system messages separately + if (samplingMsg.content.type === 'text') { + systemMessages.push(samplingMsg.content.text); + } + } else if (samplingMsg.role === 'user' || samplingMsg.role === 'assistant') { + // Convert content to BB format + const bbContent = this.convertSamplingContentToBB(samplingMsg.content); + conversationMessages.push({ + role: samplingMsg.role, + content: [bbContent], + }); + } + } + + // Prepare speakOptions with system prompt + const finalSpeakOptions: LLMSpeakWithOptions = { + ...speakOptions, + system: [ + ...(params.systemPrompt ? [params.systemPrompt] : []), + ...systemMessages, + ...(speakOptions?.system ? [speakOptions.system] : []), + ].join('\n\n').trim() || undefined, + }; + + // Backfill all messages except the last one + for (let i = 0; i < conversationMessages.length - 1; i++) { + const msg = conversationMessages[i]; + if (msg.role === 'user') { + chatInteraction.addMessageForUserRole(msg.content); + } else if (msg.role === 'assistant') { + chatInteraction.addMessageForAssistantRole(msg.content); + } + } + + // Process the final message with chat() + const lastMessage = conversationMessages[conversationMessages.length - 1]; + if (!lastMessage) { + throw new Error('No messages provided for sampling request'); + } + + // Call chat with the final message content (supporting multi-modal) + const finalPrompt = lastMessage.content.length === 1 ? lastMessage.content[0] : lastMessage.content; + const response = await chatInteraction.chat(finalPrompt, finalSpeakOptions); + + return response; + } + + /** + * Convert SamplingContent to BB LLMMessageContentPart format + * TODO: Switch to storeResourceRevision and use resource placeholder text for images/audio + */ + private convertSamplingContentToBB(content: any): LLMMessageContentPart { + switch (content.type) { + case 'text': + return { + type: 'text', + text: content.text, + }; + case 'image': + // TODO: Use storeResourceRevision and resource placeholder + return { + type: 'image', + source: { + type: 'base64', + data: content.data, + media_type: content.mimeType as any, + }, + }; + case 'audio': + // TODO: Use storeResourceRevision and resource placeholder + // For now, convert to text representation since audio isn't directly supported + return { + type: 'text', + text: `[Audio content: ${content.mimeType}, ${content.data.length} bytes]`, + }; + default: + throw new Error(`Unsupported sampling content type: ${content.type}`); + } + } } export default OrchestratorController; diff --git a/api/src/data/modelCapabilities.json b/api/src/data/modelCapabilities.json index fafbd9bf..55265906 100644 --- a/api/src/data/modelCapabilities.json +++ b/api/src/data/modelCapabilities.json @@ -1,10 +1,10 @@ { "_metadata": { - "generatedAt": "2025-07-13T07:12:27.729Z", + "generatedAt": "2025-09-30T02:43:30.131Z", "generatedBy": "update_model_capabilities.ts", "sourceFiles": "api/src/data/model_sources/*.json", "totalProviders": 6, - "totalModels": 36, + "totalModels": 41, "notes": "Pricing converted to per-token format. All data based on comprehensive research of official provider documentation as of June 2025." }, "anthropic": { @@ -14,15 +14,54 @@ "maxOutputTokens": 8192, "token_pricing": { "input": 80, - "output": 400, - "anthropic_cache_read": 8, - "anthropic_cache_write_5min": 100 + "output": 400 }, "pricing_metadata": { "currency": "USD", "effectiveDate": "2024-10-22" }, "featureKey": "models.claude.haiku", + "inputTokensCacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 8 + } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 100 + } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 160 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -52,18 +91,57 @@ "claude-3-5-sonnet-20241022": { "displayName": "Claude Sonnet 3.5", "contextWindow": 200000, - "maxOutputTokens": 128000, + "maxOutputTokens": 8192, "token_pricing": { "input": 300, - "output": 1500, - "anthropic_cache_read": 30, - "anthropic_cache_write_5min": 375 + "output": 1500 }, "pricing_metadata": { "currency": "USD", "effectiveDate": "2024-10-22" }, "featureKey": "models.claude.sonnet", + "inputTokensCacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 30 + } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 375 + } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 600 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -96,15 +174,54 @@ "maxOutputTokens": 128000, "token_pricing": { "input": 300, - "output": 1500, - "anthropic_cache_read": 30, - "anthropic_cache_write_5min": 375 + "output": 1500 }, "pricing_metadata": { "currency": "USD", "effectiveDate": "2025-02-19" }, "featureKey": "models.claude.sonnet", + "inputTokensCacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 30 + } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 375 + } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 600 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -137,15 +254,54 @@ "maxOutputTokens": 4096, "token_pricing": { "input": 25, - "output": 125, - "anthropic_cache_read": 3, - "anthropic_cache_write_5min": 31.25 + "output": 125 }, "pricing_metadata": { "currency": "USD", "effectiveDate": "2024-03-07" }, "featureKey": "models.claude.haiku", + "inputTokensCacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.12, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 3 + } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 30 + } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 50 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -178,15 +334,54 @@ "maxOutputTokens": 4096, "token_pricing": { "input": 1500, - "output": 7500, - "anthropic_cache_read": 150, - "anthropic_cache_write_5min": 1875 + "output": 7500 }, "pricing_metadata": { "currency": "USD", "effectiveDate": "2024-02-29" }, "featureKey": "models.claude.opus", + "inputTokensCacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 150 + } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 1875 + } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 3000 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -213,21 +408,60 @@ "trainingCutoff": "2023-08-01", "releaseDate": "2024-02-29" }, - "claude-opus-4-20250514": { - "displayName": "Claude Opus 4.0", + "claude-opus-4-1-20250805": { + "displayName": "Claude Opus 4.1", "contextWindow": 200000, - "maxOutputTokens": 128000, + "maxOutputTokens": 32000, "token_pricing": { "input": 1500, - "output": 7500, - "anthropic_cache_read": 150, - "anthropic_cache_write_5min": 1875 + "output": 7500 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-05-23" + "effectiveDate": "2025-08-05" }, "featureKey": "models.claude.opus", + "inputTokensCacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 150 + } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 1875 + } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 3000 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -252,23 +486,200 @@ "cost": "very-high", "intelligence": "very-high", "trainingCutoff": "2025-03-01", + "releaseDate": "2025-08-05" + }, + "claude-opus-4-20250514": { + "displayName": "Claude Opus 4.0", + "contextWindow": 200000, + "maxOutputTokens": 32000, + "token_pricing": { + "input": 1500, + "output": 7500 + }, + "pricing_metadata": { + "currency": "USD", + "effectiveDate": "2025-05-14" + }, + "featureKey": "models.claude.opus", + "inputTokensCacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 150 + } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 1875 + } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 3000 + } + ] + } + } + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 16384, + "extendedThinking": false + }, + "constraints": { + "temperature": { + "min": 0, + "max": 1 + } + }, + "systemPromptBehavior": "optional", + "responseSpeed": "medium", + "cost": "medium", + "intelligence": "high", + "trainingCutoff": "2025-03-01", "releaseDate": "2025-05-23" }, "claude-sonnet-4-20250514": { - "displayName": "Claude Sonnet 4.0", - "contextWindow": 200000, - "maxOutputTokens": 64000, + "displayName": "Claude Sonnet 4", + "contextWindow": 1000000, + "maxOutputTokens": 128000, "token_pricing": { "input": 300, - "output": 1500, - "anthropic_cache_read": 30, - "anthropic_cache_write_5min": 375 + "output": 1500 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-05-23" + "effectiveDate": "2025-05-14" }, "featureKey": "models.claude.sonnet", + "inputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 200000 + }, + "price": 300 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 200000, + "max": null + }, + "price": 600 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "inputTokensCacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": true, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 30 + }, + { + "tier": 1, + "price": 60 + } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": true, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 375 + }, + { + "tier": 1, + "price": 750 + } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": true, + "multiplier": 2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 600 + }, + { + "tier": 1, + "price": 1200 + } + ] + } + } + }, + "outputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 200000 + }, + "price": 1500 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 200000, + "max": null + }, + "price": 2250 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -289,11 +700,149 @@ } }, "systemPromptBehavior": "optional", - "responseSpeed": "medium", + "responseSpeed": "fast", "cost": "medium", "intelligence": "high", "trainingCutoff": "2025-03-01", - "releaseDate": "2025-05-23" + "releaseDate": "2025-05-14" + }, + "claude-sonnet-4-5-20250929": { + "displayName": "Claude Sonnet 4.5", + "contextWindow": 1000000, + "maxOutputTokens": 128000, + "token_pricing": { + "input": 300, + "output": 1500 + }, + "pricing_metadata": { + "currency": "USD", + "effectiveDate": "2025-05-14" + }, + "featureKey": "models.claude.sonnet", + "inputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 200000 + }, + "price": 300 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 200000, + "max": null + }, + "price": 600 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "inputTokensCacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": true, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 30 + }, + { + "tier": 1, + "price": 60 + } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": true, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 375 + }, + { + "tier": 1, + "price": 750 + } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": true, + "multiplier": 2, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 600 + }, + { + "tier": 1, + "price": 1200 + } + ] + } + } + }, + "outputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 200000 + }, + "price": 1500 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 200000, + "max": null + }, + "price": 2250 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 16384, + "extendedThinking": false + }, + "constraints": { + "temperature": { + "min": 0, + "max": 1 + } + }, + "systemPromptBehavior": "optional", + "responseSpeed": "fast", + "cost": "medium", + "intelligence": "high", + "trainingCutoff": "2025-03-01", + "releaseDate": "2025-05-14" } }, "deepseek": { @@ -302,8 +851,8 @@ "contextWindow": 64000, "maxOutputTokens": 32000, "token_pricing": { - "input": 14, - "output": 28 + "input": 14.000000000000002, + "output": 28.000000000000004 }, "pricing_metadata": { "currency": "USD", @@ -341,7 +890,7 @@ "contextWindow": 64000, "maxOutputTokens": 32000, "token_pricing": { - "input": 55, + "input": 55.00000000000001, "output": 219 }, "pricing_metadata": { @@ -382,15 +931,79 @@ "contextWindow": 1048576, "maxOutputTokens": 8192, "token_pricing": { - "input": 15, - "output": 60, - "cache_read": 3.75 + "input": 7.5, + "output": 30 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2024-09-01" + "effectiveDate": "2025-08-11" }, "featureKey": "models.gemini", + "inputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 128000 + }, + "price": 7.5 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 128000, + "max": null + }, + "price": 15 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "inputTokensCacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": true, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 1.875 + }, + { + "tier": 1, + "price": 3.75 + } + ] + } + } + }, + "outputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 128000 + }, + "price": 30 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 128000, + "max": null + }, + "price": 60 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -423,14 +1036,78 @@ "maxOutputTokens": 8192, "token_pricing": { "input": 3.75, - "output": 15, - "cache_read": 2 + "output": 15 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2024-10-01" + "effectiveDate": "2025-08-11" }, "featureKey": "models.gemini", + "inputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 128000 + }, + "price": 3.75 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 128000, + "max": null + }, + "price": 7.5 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "inputTokensCacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": true, + "multiplier": 0.27, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 1 + }, + { + "tier": 1, + "price": 2 + } + ] + } + } + }, + "outputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 128000 + }, + "price": 15 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 128000, + "max": null + }, + "price": 30 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -462,15 +1139,79 @@ "contextWindow": 2097152, "maxOutputTokens": 8192, "token_pricing": { - "input": 250, - "output": 1000, - "cache_read": 62.5 + "input": 125, + "output": 500 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2024-09-01" + "effectiveDate": "2025-08-11" }, "featureKey": "models.gemini", + "inputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 128000 + }, + "price": 125 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 128000, + "max": null + }, + "price": 250 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "inputTokensCacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": true, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 31.25 + }, + { + "tier": 1, + "price": 62.5 + } + ] + } + } + }, + "outputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 128000 + }, + "price": 500 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 128000, + "max": null + }, + "price": 1000 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -503,14 +1244,50 @@ "maxOutputTokens": 8192, "token_pricing": { "input": 10, - "output": 40, - "cache_read": 2.5 + "output": 40 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-02-01" + "effectiveDate": "2025-08-11" }, "featureKey": "models.gemini", + "inputTokensCacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 2.5 + } + ] + } + } + }, + "inputTokensContentTypes": { + "text": { + "multiplier": 1 + }, + "image": { + "multiplier": 1 + }, + "video": { + "multiplier": 1 + }, + "audio": { + "multiplier": 7, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 70 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -542,14 +1319,51 @@ "contextWindow": 1048576, "maxOutputTokens": 8192, "token_pricing": { - "input": 7.5, - "output": 30 + "input": 10, + "output": 40 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-02-01" + "effectiveDate": "2025-08-11" }, "featureKey": "models.gemini", + "inputTokensCacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 2.5 + } + ] + } + } + }, + "inputTokensContentTypes": { + "text": { + "multiplier": 1 + }, + "image": { + "multiplier": 1 + }, + "video": { + "multiplier": 1 + }, + "audio": { + "multiplier": 3, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 30 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -581,15 +1395,51 @@ "contextWindow": 1048576, "maxOutputTokens": 65536, "token_pricing": { - "input": 15, - "output": 350, - "cache_read": 3.75 + "input": 30, + "output": 250 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-05-01" + "effectiveDate": "2025-08-11" }, "featureKey": "models.gemini", + "inputTokensCacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 7.5 + } + ] + } + } + }, + "inputTokensContentTypes": { + "text": { + "multiplier": 1 + }, + "image": { + "multiplier": 1 + }, + "video": { + "multiplier": 1 + }, + "audio": { + "multiplier": 3.33, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 100 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -621,15 +1471,79 @@ "contextWindow": 1048576, "maxOutputTokens": 65536, "token_pricing": { - "input": 250, - "output": 1500, - "cache_read": 62.5 + "input": 125, + "output": 1000 }, "pricing_metadata": { "currency": "USD", "effectiveDate": "2025-05-01" }, "featureKey": "models.gemini", + "inputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 200000 + }, + "price": 125 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 200000, + "max": null + }, + "price": 250 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "inputTokensCacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": true, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 31 + }, + { + "tier": 1, + "price": 62.5 + } + ] + } + } + }, + "outputTokensTieredConfig": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { + "min": 0, + "max": 200000 + }, + "price": 1000 + }, + { + "tier": 1, + "name": "extended", + "threshold": { + "min": 200000, + "max": null + }, + "price": 1500 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -865,7 +1779,7 @@ }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-07-13" + "effectiveDate": "2025-09-30" }, "featureKey": "models.ollama", "supportedFeatures": { @@ -904,7 +1818,7 @@ }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-07-13" + "effectiveDate": "2025-09-30" }, "featureKey": "models.ollama", "supportedFeatures": { @@ -943,7 +1857,7 @@ }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-07-13" + "effectiveDate": "2025-09-30" }, "featureKey": "models.ollama", "supportedFeatures": { @@ -982,7 +1896,7 @@ }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-07-13" + "effectiveDate": "2025-09-30" }, "featureKey": "models.ollama", "supportedFeatures": { @@ -1021,7 +1935,7 @@ }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-07-13" + "effectiveDate": "2025-09-30" }, "featureKey": "models.ollama", "supportedFeatures": { @@ -1060,7 +1974,7 @@ }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-07-13" + "effectiveDate": "2025-09-30" }, "featureKey": "models.ollama", "supportedFeatures": { @@ -1099,7 +2013,7 @@ }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-07-13" + "effectiveDate": "2025-09-30" }, "featureKey": "models.ollama", "supportedFeatures": { @@ -1175,14 +2089,28 @@ "maxOutputTokens": 32768, "token_pricing": { "input": 200, - "output": 800, - "cache_read": 50 + "output": 800 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-04-14" + "effectiveDate": "2025-08-13" }, "featureKey": "models.openai.gpt4", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 50 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -1215,14 +2143,28 @@ "maxOutputTokens": 32768, "token_pricing": { "input": 40, - "output": 160, - "cache_read": 10 + "output": 160 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-04-14" + "effectiveDate": "2025-08-13" }, "featureKey": "models.openai.gpt4", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 10 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -1255,14 +2197,28 @@ "maxOutputTokens": 32768, "token_pricing": { "input": 10, - "output": 40, - "cache_read": 2.5 + "output": 40 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-04-14" + "effectiveDate": "2025-08-13" }, "featureKey": "models.openai.gpt4", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 2.5 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -1294,15 +2250,61 @@ "contextWindow": 128000, "maxOutputTokens": 16384, "token_pricing": { - "input": 250, - "output": 1000, - "cache_read": 125 + "input": 500, + "output": 2000 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2024-05-13" + "effectiveDate": "2025-08-13" }, "featureKey": "models.openai.gpt4", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.5, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 250 + } + ] + } + } + }, + "inputTokensContentTypes": { + "text": { + "multiplier": 1 + }, + "audio": { + "multiplier": 8, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 4000 + } + ] + } + } + }, + "outputTokensContentTypes": { + "text": { + "multiplier": 1 + }, + "audio": { + "multiplier": 4, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 8000 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -1334,15 +2336,61 @@ "contextWindow": 128000, "maxOutputTokens": 16384, "token_pricing": { - "input": 15, - "output": 60, - "cache_read": 7.5 + "input": 60, + "output": 240 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2024-07-18" + "effectiveDate": "2025-08-13" }, "featureKey": "models.openai.gpt4", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.5, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 30 + } + ] + } + } + }, + "inputTokensContentTypes": { + "text": { + "multiplier": 1 + }, + "audio": { + "multiplier": 16.67, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 1000 + } + ] + } + } + }, + "outputTokensContentTypes": { + "text": { + "multiplier": 1 + }, + "audio": { + "multiplier": 8.33, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 2000 + } + ] + } + } + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -1369,6 +2417,183 @@ "trainingCutoff": "2023-10-01", "releaseDate": "2024-07-18" }, + "gpt-5-2025-08-07": { + "displayName": "GPT-5", + "contextWindow": 400000, + "maxOutputTokens": 128000, + "token_pricing": { + "input": 125, + "output": 1000, + "thought": 1000 + }, + "pricing_metadata": { + "currency": "USD", + "effectiveDate": "2025-08-13" + }, + "featureKey": "models.openai.gpt5", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 12.5 + } + ] + } + } + }, + "thoughtTokensConfig": { + "basePrice": 1000, + "description": "Tokens used for extended thinking operations" + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 32768, + "extendedThinking": false + }, + "constraints": { + "temperature": { + "min": 0, + "max": 2 + } + }, + "systemPromptBehavior": "optional", + "responseSpeed": "medium", + "cost": "very-high", + "intelligence": "very-high", + "trainingCutoff": "2024-09-30", + "releaseDate": "2025-08-07" + }, + "gpt-5-mini-2025-08-07": { + "displayName": "GPT-5 Mini", + "contextWindow": 400000, + "maxOutputTokens": 128000, + "token_pricing": { + "input": 25, + "output": 200, + "thought": 200 + }, + "pricing_metadata": { + "currency": "USD", + "effectiveDate": "2025-08-13" + }, + "featureKey": "models.openai.gpt5", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 2.5 + } + ] + } + } + }, + "thoughtTokensConfig": { + "basePrice": 200, + "description": "Tokens used for extended thinking operations" + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 32768, + "extendedThinking": false + }, + "constraints": { + "temperature": { + "min": 0, + "max": 2 + } + }, + "systemPromptBehavior": "optional", + "responseSpeed": "fast", + "cost": "medium", + "intelligence": "high", + "trainingCutoff": "2024-05-31", + "releaseDate": "2025-08-07" + }, + "gpt-5-nano-2025-08-07": { + "displayName": "GPT-5 Nano", + "contextWindow": 400000, + "maxOutputTokens": 128000, + "token_pricing": { + "input": 5, + "output": 40, + "thought": 40 + }, + "pricing_metadata": { + "currency": "USD", + "effectiveDate": "2025-08-13" + }, + "featureKey": "models.openai.gpt5", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 0.5 + } + ] + } + } + }, + "thoughtTokensConfig": { + "basePrice": 40, + "description": "Tokens used for extended thinking operations" + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 32768, + "extendedThinking": false + }, + "constraints": { + "temperature": { + "min": 0, + "max": 2 + } + }, + "systemPromptBehavior": "optional", + "responseSpeed": "very fast", + "cost": "low", + "intelligence": "medium", + "trainingCutoff": "2024-05-31", + "releaseDate": "2025-08-07" + }, "o3": { "displayName": "o3", "contextWindow": 200000, @@ -1376,13 +2601,32 @@ "token_pricing": { "input": 1000, "output": 4000, - "cache_read": 250 + "thought": 4000 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-04-16" + "effectiveDate": "2025-08-13" }, "featureKey": "models.openai.o3", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 250 + } + ] + } + } + }, + "thoughtTokensConfig": { + "basePrice": 4000, + "description": "Tokens used for reasoning/thinking operations" + }, "supportedFeatures": { "functionCalling": true, "json": true, @@ -1414,15 +2658,34 @@ "contextWindow": 200000, "maxOutputTokens": 100000, "token_pricing": { - "input": 110, - "output": 440, - "cache_read": 27.5 + "input": 110.00000000000001, + "output": 440.00000000000006, + "thought": 440.00000000000006 }, "pricing_metadata": { "currency": "USD", - "effectiveDate": "2025-04-16" + "effectiveDate": "2025-08-13" }, "featureKey": "models.openai.o4", + "inputTokensCacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { + "tier": 0, + "price": 27.500000000000004 + } + ] + } + } + }, + "thoughtTokensConfig": { + "basePrice": 440.00000000000006, + "description": "Tokens used for reasoning/thinking operations" + }, "supportedFeatures": { "functionCalling": true, "json": true, diff --git a/api/src/data/model_sources/anthropic_models.json b/api/src/data/model_sources/anthropic_models.json index fe872b45..53b97870 100644 --- a/api/src/data/model_sources/anthropic_models.json +++ b/api/src/data/model_sources/anthropic_models.json @@ -1,25 +1,204 @@ { - "lastUpdated": "2025-06-04T04:30:00.000Z", - "source": "Official Anthropic documentation research", + "lastUpdated": "2025-08-13T01:54:00.000Z", + "source": "Official Anthropic documentation research - Added Claude Sonnet 4 with tiered pricing and updated cache structure", "pricingUnit": "per_1M_tokens", - "notes": "All pricing verified from official docs as of June 2025. Extended thinking tokens charge for full thinking, not summarized output. Image tokens calculated as (width × height)/750.", + "notes": "All pricing verified from official Anthropic docs as of August 2025. Added Claude Sonnet 4 with tiered pricing based on context usage (≤200K vs >200K tokens). Updated cache pricing structure with 5-minute vs 1-hour TTL. Image tokens calculated as (width × height)/750. Claude Sonnet 4 supports 1M context window in beta.", "models": [ + { + "modelId": "claude-sonnet-4-5-20250929", + "displayName": "Claude Sonnet 4.5", + "family": "Claude-4", + "contextWindow": 1000000, + "maxOutputTokens": 128000, + "pricing": { + "inputTokens": { + "basePrice": 3.00, + "tieredPricing": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 200000 }, + "price": 3.00 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 200000, "max": null }, + "price": 6.00 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "cacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": true, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.30 }, + { "tier": 1, "price": 0.60 } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": true, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 3.75 }, + { "tier": 1, "price": 7.50 } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": true, + "multiplier": 2.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 6.00 }, + { "tier": 1, "price": 12.00 } + ] + } + } + } + }, + "outputTokens": { + "basePrice": 15.00, + "tieredPricing": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 200000 }, + "price": 15.00 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 200000, "max": null }, + "price": 22.50 + } + ], + "tierDeterminedBy": "totalInputTokens" + } + }, + "currency": "USD", + "effectiveDate": "2025-05-14" + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 16384, + "extendedThinking": false + }, + "constraints": { + "temperature": { "min": 0.0, "max": 1.0 } + }, + "systemPromptBehavior": "optional", + "trainingCutoff": "2025-03-01", + "releaseDate": "2025-05-14", + "responseSpeed": "fast", + "cost": "medium", + "intelligence": "high", + "modality": "text-and-vision", + "description": "High-performance model with exceptional reasoning capabilities and tiered pricing for large contexts", + "deprecated": false + }, { "modelId": "claude-sonnet-4-20250514", - "displayName": "Claude Sonnet 4.0", + "displayName": "Claude Sonnet 4", "family": "Claude-4", - "contextWindow": 200000, - "maxOutputTokens": 64000, + "contextWindow": 1000000, + "maxOutputTokens": 128000, "pricing": { "inputTokens": { "basePrice": 3.00, - "cachedPrice": 0.3 + "tieredPricing": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 200000 }, + "price": 3.00 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 200000, "max": null }, + "price": 6.00 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "cacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": true, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.30 }, + { "tier": 1, "price": 0.60 } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": true, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 3.75 }, + { "tier": 1, "price": 7.50 } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": true, + "multiplier": 2.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 6.00 }, + { "tier": 1, "price": 12.00 } + ] + } + } + } }, "outputTokens": { - "basePrice": 15.00 + "basePrice": 15.00, + "tieredPricing": { + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 200000 }, + "price": 15.00 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 200000, "max": null }, + "price": 22.50 + } + ], + "tierDeterminedBy": "totalInputTokens" + } }, "currency": "USD", - "effectiveDate": "2025-05-23" + "effectiveDate": "2025-05-14" }, "supportedFeatures": { "functionCalling": true, @@ -39,29 +218,62 @@ }, "systemPromptBehavior": "optional", "trainingCutoff": "2025-03-01", - "releaseDate": "2025-05-23", - "responseSpeed": "medium", + "releaseDate": "2025-05-14", + "responseSpeed": "fast", "cost": "medium", "intelligence": "high", "modality": "text-and-vision", - "description": "Anthropic's newest flagship model with advanced reasoning" + "description": "High-performance model with exceptional reasoning capabilities and tiered pricing for large contexts", + "deprecated": false }, { - "modelId": "claude-opus-4-20250514", - "displayName": "Claude Opus 4.0", + "modelId": "claude-opus-4-1-20250805", + "alias": "claude-opus-4-1", + "displayName": "Claude Opus 4.1", "family": "Claude-4", "contextWindow": 200000, - "maxOutputTokens": 128000, + "maxOutputTokens": 32000, "pricing": { "inputTokens": { "basePrice": 15.00, - "cachedPrice": 1.50 + "cacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 1.50 } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 18.75 } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 30.00 } + ] + } + } + } }, "outputTokens": { "basePrice": 75.00 }, "currency": "USD", - "effectiveDate": "2025-05-23" + "effectiveDate": "2025-08-05" }, "supportedFeatures": { "functionCalling": true, @@ -81,12 +293,86 @@ }, "systemPromptBehavior": "optional", "trainingCutoff": "2025-03-01", - "releaseDate": "2025-05-23", + "releaseDate": "2025-08-05", "responseSpeed": "slow", "cost": "very-high", "intelligence": "very-high", "modality": "text-and-vision", - "description": "Anthropic's most capable model for complex reasoning tasks" + "description": "Our most capable and intelligent model yet. Sets new standards in complex reasoning and advanced coding.", + "deprecated": false + }, + { + "modelId": "claude-opus-4-20250514", + "displayName": "Claude Opus 4.0", + "family": "Claude-4", + "contextWindow": 200000, + "maxOutputTokens": 32000, + "pricing": { + "inputTokens": { + "basePrice": 15.00, + "cacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 1.50 } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 18.75 } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 30.00 } + ] + } + } + } + }, + "outputTokens": { + "basePrice": 75.00 + }, + "currency": "USD", + "effectiveDate": "2025-05-14" + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 16384, + "extendedThinking": false + }, + "constraints": { + "temperature": { "min": 0.0, "max": 1.0 } + }, + "systemPromptBehavior": "optional", + "trainingCutoff": "2025-03-01", + "releaseDate": "2025-05-23", + "responseSpeed": "medium", + "cost": "medium", + "intelligence": "high", + "modality": "text-and-vision", + "description": "Anthropic's newest flagship model with advanced reasoning" }, { "modelId": "claude-3-7-sonnet-20250219", @@ -97,7 +383,38 @@ "pricing": { "inputTokens": { "basePrice": 3.00, - "cachedPrice": 0.3 + "cacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.30 } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 3.75 } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 6.00 } + ] + } + } + } }, "outputTokens": { "basePrice": 15.00 @@ -135,11 +452,42 @@ "displayName": "Claude Sonnet 3.5", "family": "Claude-3", "contextWindow": 200000, - "maxOutputTokens": 128000, + "maxOutputTokens": 8192, "pricing": { "inputTokens": { "basePrice": 3.00, - "cachedPrice": 0.30 + "cacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.30 } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 3.75 } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 6.00 } + ] + } + } + } }, "outputTokens": { "basePrice": 15.00 @@ -181,7 +529,38 @@ "pricing": { "inputTokens": { "basePrice": 0.80, - "cachedPrice": 0.08 + "cacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.08 } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 1.00 } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 1.60 } + ] + } + } + } }, "outputTokens": { "basePrice": 4.00 @@ -223,7 +602,38 @@ "pricing": { "inputTokens": { "basePrice": 15.00, - "cachedPrice": 1.50 + "cacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 1.50 } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 18.75 } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 30.00 } + ] + } + } + } }, "outputTokens": { "basePrice": 75.00 @@ -265,7 +675,38 @@ "pricing": { "inputTokens": { "basePrice": 0.25, - "cachedPrice": 0.03 + "cacheTypes": { + "read": { + "description": "Cache read operations", + "inheritsTiers": false, + "multiplier": 0.12, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.03 } + ] + } + }, + "write_time1": { + "description": "Short-term cache write (5min TTL)", + "inheritsTiers": false, + "multiplier": 1.2, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.30 } + ] + } + }, + "write_time2": { + "description": "Long-term cache write (60min TTL)", + "inheritsTiers": false, + "multiplier": 2.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.50 } + ] + } + } + } }, "outputTokens": { "basePrice": 1.25 diff --git a/api/src/data/model_sources/google_models.json b/api/src/data/model_sources/google_models.json index 5366a498..9e68c4e1 100644 --- a/api/src/data/model_sources/google_models.json +++ b/api/src/data/model_sources/google_models.json @@ -1,8 +1,8 @@ { - "lastUpdated": "2025-06-04T04:30:00.000Z", - "source": "Official Google Gemini API documentation research", + "lastUpdated": "2025-08-13T01:58:00.000Z", + "source": "Official Google Gemini API documentation research - Updated with new tiered pricing format", "pricingUnit": "per_1M_tokens", - "notes": "Pricing varies by context length (≤200k vs >200k tokens). 2.5 series models are in Preview with restricted rate limits. Free tier available for most models. Thinking models charge separately for reasoning tokens.", + "notes": "All pricing verified from official Google docs as of August 2025. Updated tiered pricing format for context-based and content-based pricing. Thinking models charge reasoning tokens as part of output tokens. Content types include audio, image, video with different rates.", "models": [ { "modelId": "gemini-2.5-pro-preview-06-05", @@ -12,20 +12,56 @@ "maxOutputTokens": 65536, "pricing": { "inputTokens": { - "basePrice": 2.5, - "cachedPrice": 0.625, + "basePrice": 2.50, "tieredPricing": { - "under200k": 1.25, - "over200k": 2.5, - "cacheUnder200k": 0.31, - "cachedOver200k": 0.625 + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 200000 }, + "price": 1.25 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 200000, "max": null }, + "price": 2.50 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "cacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": true, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.31 }, + { "tier": 1, "price": 0.625 } + ] + } + } } }, "outputTokens": { - "basePrice": 15.0, + "basePrice": 15.00, "tieredPricing": { - "under200k": 10.0, - "over200k": 15.0 + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 200000 }, + "price": 10.00 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 200000, "max": null }, + "price": 15.00 + } + ], + "tierDeterminedBy": "totalInputTokens" } }, "currency": "USD", @@ -66,22 +102,38 @@ "maxOutputTokens": 65536, "pricing": { "inputTokens": { - "basePrice": 0.15, - "cachedPrice": 0.0375, - "multimodal": { - "text": 0.15, - "audio": 1.0 + "basePrice": 0.30, + "cacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.075 } + ] + } + } + }, + "contentTypes": { + "text": { "multiplier": 1.0 }, + "image": { "multiplier": 1.0 }, + "video": { "multiplier": 1.0 }, + "audio": { + "multiplier": 3.33, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 1.00 } + ] + } + } } }, "outputTokens": { - "basePrice": 3.5, - "tieredPricing": { - "nonThinking": 0.6, - "thinking": 3.5 - } + "basePrice": 2.50 }, "currency": "USD", - "effectiveDate": "2025-05-01" + "effectiveDate": "2025-08-11" }, "supportedFeatures": { "functionCalling": true, @@ -118,20 +170,38 @@ "maxOutputTokens": 8192, "pricing": { "inputTokens": { - "basePrice": 0.1, - "cachedPrice": 0.025, - "multimodal": { - "text": 0.1, - "audio": 0.7, - "cachedTextPrice": 0.025, - "cachedAudioPrice": 0.175 + "basePrice": 0.10, + "cacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.025 } + ] + } + } + }, + "contentTypes": { + "text": { "multiplier": 1.0 }, + "image": { "multiplier": 1.0 }, + "video": { "multiplier": 1.0 }, + "audio": { + "multiplier": 7.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.70 } + ] + } + } } }, "outputTokens": { - "basePrice": 0.4 + "basePrice": 0.40 }, "currency": "USD", - "effectiveDate": "2025-02-01" + "effectiveDate": "2025-08-11" }, "supportedFeatures": { "functionCalling": true, @@ -167,13 +237,38 @@ "maxOutputTokens": 8192, "pricing": { "inputTokens": { - "basePrice": 0.075 + "basePrice": 0.10, + "cacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.025 } + ] + } + } + }, + "contentTypes": { + "text": { "multiplier": 1.0 }, + "image": { "multiplier": 1.0 }, + "video": { "multiplier": 1.0 }, + "audio": { + "multiplier": 3.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.30 } + ] + } + } + } }, "outputTokens": { - "basePrice": 0.3 + "basePrice": 0.40 }, "currency": "USD", - "effectiveDate": "2025-02-01" + "effectiveDate": "2025-08-11" }, "supportedFeatures": { "functionCalling": true, @@ -209,24 +304,60 @@ "maxOutputTokens": 8192, "pricing": { "inputTokens": { - "basePrice": 2.5, - "cachedPrice": 0.625, + "basePrice": 2.50, "tieredPricing": { - "under128k": 1.25, - "over128k": 2.5, - "cacheUnder128k": 0.3125, - "cachedOver128k": 0.625 + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 128000 }, + "price": 1.25 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 128000, "max": null }, + "price": 2.50 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "cacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": true, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.3125 }, + { "tier": 1, "price": 0.625 } + ] + } + } } }, "outputTokens": { - "basePrice": 10.0, + "basePrice": 10.00, "tieredPricing": { - "under128k": 5.0, - "over128k": 10.0 + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 128000 }, + "price": 5.00 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 128000, "max": null }, + "price": 10.00 + } + ], + "tierDeterminedBy": "totalInputTokens" } }, "currency": "USD", - "effectiveDate": "2024-09-01" + "effectiveDate": "2025-08-11" }, "supportedFeatures": { "functionCalling": true, @@ -263,23 +394,59 @@ "pricing": { "inputTokens": { "basePrice": 0.15, - "cachedPrice": 0.0375, "tieredPricing": { - "under128k": 0.075, - "over128k": 0.15, - "cacheUnder128k": 0.01875, - "cachedOver128k": 0.0375 + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 128000 }, + "price": 0.075 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 128000, "max": null }, + "price": 0.15 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "cacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": true, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.01875 }, + { "tier": 1, "price": 0.0375 } + ] + } + } } }, "outputTokens": { - "basePrice": 0.6, + "basePrice": 0.60, "tieredPricing": { - "under128k": 0.3, - "over128k": 0.6 + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 128000 }, + "price": 0.30 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 128000, "max": null }, + "price": 0.60 + } + ], + "tierDeterminedBy": "totalInputTokens" } }, "currency": "USD", - "effectiveDate": "2024-09-01" + "effectiveDate": "2025-08-11" }, "supportedFeatures": { "functionCalling": true, @@ -315,24 +482,60 @@ "maxOutputTokens": 8192, "pricing": { "inputTokens": { - "basePrice": 0.0375, - "cachedPrice": 0.02, + "basePrice": 0.075, "tieredPricing": { - "under128k": 0.0375, - "over128k": 0.075, - "cacheUnder128k": 0.01, - "cachedOver128k": 0.02 + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 128000 }, + "price": 0.0375 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 128000, "max": null }, + "price": 0.075 + } + ], + "tierDeterminedBy": "totalInputTokens" + }, + "cacheTypes": { + "read": { + "description": "Context cache read operations", + "inheritsTiers": true, + "multiplier": 0.27, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.01 }, + { "tier": 1, "price": 0.02 } + ] + } + } } }, "outputTokens": { - "basePrice": 0.15, + "basePrice": 0.30, "tieredPricing": { - "under128k": 0.15, - "over128k": 0.3 + "tiers": [ + { + "tier": 0, + "name": "base", + "threshold": { "min": 0, "max": 128000 }, + "price": 0.15 + }, + { + "tier": 1, + "name": "extended", + "threshold": { "min": 128000, "max": null }, + "price": 0.30 + } + ], + "tierDeterminedBy": "totalInputTokens" } }, "currency": "USD", - "effectiveDate": "2024-10-01" + "effectiveDate": "2025-08-11" }, "supportedFeatures": { "functionCalling": true, diff --git a/api/src/data/model_sources/openai_models.json b/api/src/data/model_sources/openai_models.json index 09f1cb2a..9e2cef3a 100644 --- a/api/src/data/model_sources/openai_models.json +++ b/api/src/data/model_sources/openai_models.json @@ -1,9 +1,186 @@ { - "lastUpdated": "2025-06-04T04:30:00.000Z", - "source": "Official OpenAI documentation research", + "lastUpdated": "2025-08-13T03:49:00.000Z", + "source": "Official OpenAI documentation research - Updated pricing and cache structure for all models", "pricingUnit": "per_1M_tokens", - "notes": "All pricing verified from official OpenAI docs as of June 2025. Reasoning models charge for reasoning tokens. Batch API offers 50% savings. Cache pricing applies to prompt caching feature.", + "notes": "All pricing verified from official OpenAI docs as of August 2025. GPT-5, GPT-4.1, o3, and o4-mini models now available. Updated cache pricing structure to new format. Batch API offers 50% savings. Realtime API has separate pricing for audio vs text tokens.", "models": [ + { + "modelId": "gpt-5-2025-08-07", + "alias": "gpt-5", + "displayName": "GPT-5", + "family": "GPT-5", + "contextWindow": 400000, + "maxOutputTokens": 128000, + "pricing": { + "inputTokens": { + "basePrice": 1.25, + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.125 } + ] + } + } + } + }, + "outputTokens": { + "basePrice": 10.00 + }, + "thoughtTokens": { + "basePrice": 10.00, + "description": "Tokens used for extended thinking operations" + }, + "currency": "USD", + "effectiveDate": "2025-08-13" + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 32768, + "extendedThinking": false + }, + "constraints": { + "temperature": { "min": 0.0, "max": 2.0 } + }, + "systemPromptBehavior": "optional", + "trainingCutoff": "2024-09-30", + "releaseDate": "2025-08-07", + "responseSpeed": "medium", + "cost": "very-high", + "intelligence": "very-high", + "modality": "text-and-vision", + "description": "Flagship model for coding, reasoning, and agentic tasks across domains", + "deprecated": false + }, + { + "modelId": "gpt-5-mini-2025-08-07", + "alias": "gpt-5-mini", + "displayName": "GPT-5 Mini", + "family": "GPT-5", + "contextWindow": 400000, + "maxOutputTokens": 128000, + "pricing": { + "inputTokens": { + "basePrice": 0.25, + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.025 } + ] + } + } + } + }, + "outputTokens": { + "basePrice": 2.00 + }, + "thoughtTokens": { + "basePrice": 2.00, + "description": "Tokens used for extended thinking operations" + }, + "currency": "USD", + "effectiveDate": "2025-08-13" + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 32768, + "extendedThinking": false + }, + "constraints": { + "temperature": { "min": 0.0, "max": 2.0 } + }, + "systemPromptBehavior": "optional", + "trainingCutoff": "2024-05-31", + "releaseDate": "2025-08-07", + "responseSpeed": "fast", + "cost": "medium", + "intelligence": "high", + "modality": "text-and-vision", + "description": "Faster, more cost-efficient version of GPT-5 for well-defined tasks", + "deprecated": false + }, + { + "modelId": "gpt-5-nano-2025-08-07", + "alias": "gpt-5-nano", + "displayName": "GPT-5 Nano", + "family": "GPT-5", + "contextWindow": 400000, + "maxOutputTokens": 128000, + "pricing": { + "inputTokens": { + "basePrice": 0.05, + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.1, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.005 } + ] + } + } + } + }, + "outputTokens": { + "basePrice": 0.40 + }, + "thoughtTokens": { + "basePrice": 0.40, + "description": "Tokens used for extended thinking operations" + }, + "currency": "USD", + "effectiveDate": "2025-08-13" + }, + "supportedFeatures": { + "functionCalling": true, + "json": true, + "streaming": true, + "vision": true, + "promptCaching": true, + "extendedThinking": true + }, + "defaults": { + "temperature": 0.7, + "maxTokens": 32768, + "extendedThinking": false + }, + "constraints": { + "temperature": { "min": 0.0, "max": 2.0 } + }, + "systemPromptBehavior": "optional", + "trainingCutoff": "2024-05-31", + "releaseDate": "2025-08-07", + "responseSpeed": "very fast", + "cost": "low", + "intelligence": "medium", + "modality": "text-and-vision", + "description": "Fastest, cheapest version of GPT-5 for summarization and classification tasks", + "deprecated": false + }, { "modelId": "gpt-4.1", "displayName": "GPT-4.1", @@ -13,13 +190,24 @@ "pricing": { "inputTokens": { "basePrice": 2.00, - "cachedPrice": 0.50 + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.50 } + ] + } + } + } }, "outputTokens": { "basePrice": 8.00 }, "currency": "USD", - "effectiveDate": "2025-04-14" + "effectiveDate": "2025-08-13" }, "supportedFeatures": { "functionCalling": true, @@ -55,13 +243,24 @@ "pricing": { "inputTokens": { "basePrice": 0.40, - "cachedPrice": 0.10 + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.10 } + ] + } + } + } }, "outputTokens": { "basePrice": 1.60 }, "currency": "USD", - "effectiveDate": "2025-04-14" + "effectiveDate": "2025-08-13" }, "supportedFeatures": { "functionCalling": true, @@ -97,13 +296,24 @@ "pricing": { "inputTokens": { "basePrice": 0.10, - "cachedPrice": 0.025 + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.025 } + ] + } + } + } }, "outputTokens": { "basePrice": 0.40 }, "currency": "USD", - "effectiveDate": "2025-04-14" + "effectiveDate": "2025-08-13" }, "supportedFeatures": { "functionCalling": true, @@ -138,14 +348,47 @@ "maxOutputTokens": 16384, "pricing": { "inputTokens": { - "basePrice": 2.50, - "cachedPrice": 1.25 + "basePrice": 5.00, + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.5, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 2.50 } + ] + } + } + }, + "contentTypes": { + "text": { "multiplier": 1.0 }, + "audio": { + "multiplier": 8.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 40.00 } + ] + } + } + } }, "outputTokens": { - "basePrice": 10.00 + "basePrice": 20.00, + "contentTypes": { + "text": { "multiplier": 1.0 }, + "audio": { + "multiplier": 4.0, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 80.00 } + ] + } + } + } }, "currency": "USD", - "effectiveDate": "2024-05-13" + "effectiveDate": "2025-08-13" }, "supportedFeatures": { "functionCalling": true, @@ -180,14 +423,47 @@ "maxOutputTokens": 16384, "pricing": { "inputTokens": { - "basePrice": 0.15, - "cachedPrice": 0.075 + "basePrice": 0.60, + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.5, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.30 } + ] + } + } + }, + "contentTypes": { + "text": { "multiplier": 1.0 }, + "audio": { + "multiplier": 16.67, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 10.00 } + ] + } + } + } }, "outputTokens": { - "basePrice": 0.60 + "basePrice": 2.40, + "contentTypes": { + "text": { "multiplier": 1.0 }, + "audio": { + "multiplier": 8.33, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 20.00 } + ] + } + } + } }, "currency": "USD", - "effectiveDate": "2024-07-18" + "effectiveDate": "2025-08-13" }, "supportedFeatures": { "functionCalling": true, @@ -223,13 +499,28 @@ "pricing": { "inputTokens": { "basePrice": 10.00, - "cachedPrice": 2.50 + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 2.50 } + ] + } + } + } }, "outputTokens": { "basePrice": 40.00 }, + "thoughtTokens": { + "basePrice": 40.00, + "description": "Tokens used for reasoning/thinking operations" + }, "currency": "USD", - "effectiveDate": "2025-04-16" + "effectiveDate": "2025-08-13" }, "supportedFeatures": { "functionCalling": true, @@ -265,13 +556,28 @@ "pricing": { "inputTokens": { "basePrice": 1.10, - "cachedPrice": 0.275 + "cacheTypes": { + "read": { + "description": "Prompt cache read operations", + "inheritsTiers": false, + "multiplier": 0.25, + "explicitPricing": { + "tiers": [ + { "tier": 0, "price": 0.275 } + ] + } + } + } }, "outputTokens": { "basePrice": 4.40 }, + "thoughtTokens": { + "basePrice": 4.40, + "description": "Tokens used for reasoning/thinking operations" + }, "currency": "USD", - "effectiveDate": "2025-04-16" + "effectiveDate": "2025-08-13" }, "supportedFeatures": { "functionCalling": true, diff --git a/api/src/dataSources/base/baseDataSourceProvider.ts b/api/src/dataSources/base/baseDataSourceProvider.ts index a57f5c9d..926b3e2a 100644 --- a/api/src/dataSources/base/baseDataSourceProvider.ts +++ b/api/src/dataSources/base/baseDataSourceProvider.ts @@ -21,6 +21,7 @@ import type { DataSourceProviderType, DataSourceSearchCapability, } from 'shared/types/dataSource.ts'; +import type { InstructionFilters } from 'api/types/instructionFilters.ts'; /** * Abstract base class for all data source providers @@ -192,10 +193,81 @@ export abstract class BaseDataSourceProvider implements DataSourceProvider { return this.capabilities.includes(capability); } + /** + * Get detailed editing instructions for LLM tool usage + * Must be implemented by subclasses to provide comprehensive editing guidance + * @param filters Optional filters to customize instruction content + * @returns Detailed instruction text with provider-specific examples and workflows + */ + abstract getDetailedInstructions(filters?: InstructionFilters): string; + /** * Get content type guidance for LLM tool usage * Must be implemented by subclasses to provide provider-specific guidance * @returns ContentTypeGuidance object with usage examples and constraints */ abstract getContentTypeGuidance(): ContentTypeGuidance; + + /** + * Get error-specific guidance for LLM tool usage + * Provides context-aware error messaging based on error type and operation + * @param errorType The type of error encountered + * @param operation The operation that failed + * @param hasLoadedInstructions Whether the LLM has already loaded instructions + * @returns Object with enhanced error message and guidance type + */ + getErrorGuidance( + errorType: + | 'not_found' + | 'permission_denied' + | 'invalid_format' + | 'workflow_violation' + | 'configuration' + | 'unknown', + operation: string, + hasLoadedInstructions: boolean = false, + ): { message: string; type: 'workflow' | 'instructions' | 'configuration' | 'format' } { + // Default implementation - can be overridden by subclasses + const instructionsReminder = hasLoadedInstructions + ? "💡 Review the datasource instructions you've loaded, especially the workflow sections marked as CRITICAL." + : "🔍 **Load datasource instructions first**: Use `loadDatasource` with `returnType='instructions'` to get detailed workflows and requirements for this data source."; + + switch (errorType) { + case 'not_found': + return { + message: + `${instructionsReminder}\n\n📋 **Resource not found**: Verify the resource path exists and you have access. Consider loading the resource first to confirm its availability.`, + type: 'workflow', + }; + + case 'workflow_violation': + case 'invalid_format': + return { + message: + `${instructionsReminder}\n\n⚠️ **Workflow error**: This operation may require following specific steps. Check the datasource instructions for required workflows and parameter formats.`, + type: 'workflow', + }; + + case 'configuration': + return { + message: + `${instructionsReminder}\n\n⚙️ **Configuration issue**: This error suggests missing or incorrect configuration. Review the datasource setup requirements and parameter formats.`, + type: 'configuration', + }; + + case 'permission_denied': + return { + message: + `${instructionsReminder}\n\n🔐 **Access denied**: Verify you have the required permissions for this ${operation} operation on ${this.name} resources.`, + type: 'configuration', + }; + + default: + return { + message: + `${instructionsReminder}\n\n🔧 **Error in ${operation} operation**: This ${this.name} operation failed. The datasource instructions contain critical workflows and requirements that may resolve this issue.`, + type: 'instructions', + }; + } + } } diff --git a/api/src/dataSources/base/bbResourceAccessor.ts b/api/src/dataSources/base/bbResourceAccessor.ts index 1a785e2c..390ffbfb 100644 --- a/api/src/dataSources/base/bbResourceAccessor.ts +++ b/api/src/dataSources/base/bbResourceAccessor.ts @@ -24,7 +24,16 @@ import type { ResourceWriteOptions, ResourceWriteResult, } from 'shared/types/dataSourceResource.ts'; +import type { ResourceSuggestionsOptions, ResourceSuggestionsResponse } from '../../utils/resourceSuggestions.utils.ts'; import type { PortableTextBlock } from 'api/types/portableText.ts'; +import type { TabularSheet } from 'api/types/tabular.ts'; +import type { + EnhancedURIParseResult, + URIConstructionOptions, + URIContext, + ValidationMode, + ValidationResult, +} from 'shared/types/resourceValidation.ts'; /** * Abstract base class for BB-managed resource accessors @@ -100,7 +109,7 @@ export abstract class BBResourceAccessor extends BaseResourceAccessor { */ writeResource?( resourceUri: string, - content: string | Uint8Array | PortableTextBlock[], + content: string | Uint8Array | Array | Array, options?: ResourceWriteOptions, ): Promise; @@ -141,6 +150,45 @@ export abstract class BBResourceAccessor extends BaseResourceAccessor { */ deleteResource?(resourceUri: string, options?: ResourceDeleteOptions): Promise; + /** + * Build a resource URI for this datasource with context awareness + * Default implementation delegates to provider if available + * @param partialPath Partial path or resource identifier + * @param options Construction options with context + * @returns Properly formatted URI for this datasource + */ + buildResourceUri?(partialPath: string, options: URIConstructionOptions): Promise; + + /** + * Parse and validate a resource URI with enhanced context + * Default implementation provides basic validation + * @param resourceUri URI to parse and validate + * @param mode Validation mode to use + * @returns Enhanced parsing result with validation context + */ + parseResourceUri?(resourceUri: string, mode?: ValidationMode): Promise; + + /** + * Validate a resource URI with detailed feedback + * Default implementation provides basic validation + * @param resourceUri URI to validate + * @param mode Validation mode to use + * @returns Detailed validation result + */ + validateResourceUri?(resourceUri: string, mode?: ValidationMode): Promise; + + /** + * Suggest resources for autocomplete based on partial path + * Optional capability - implement in subclasses if supported + * @param partialPath Partial path input from user + * @param options Suggestion options (limit, filters, etc.) + * @returns Resource suggestions for autocomplete + */ + suggestResourcesForPath?( + partialPath: string, + options: ResourceSuggestionsOptions, + ): Promise; + /** * Check if this accessor has a specific capability * @param capability The capability to check for diff --git a/api/src/dataSources/base/mcpResourceAccessor.ts b/api/src/dataSources/base/mcpResourceAccessor.ts index aaa2004e..b9da2d87 100644 --- a/api/src/dataSources/base/mcpResourceAccessor.ts +++ b/api/src/dataSources/base/mcpResourceAccessor.ts @@ -21,6 +21,8 @@ import type { } from 'shared/types/dataSourceResource.ts'; import type { DataSourceCapability } from 'shared/types/dataSource.ts'; import type { MCPManager } from 'api/mcp/mcpManager.ts'; +import type { PortableTextBlock } from 'api/types/portableText.ts'; +import type { TabularSheet } from 'api/types/tabular.ts'; /** * Abstract base class for MCP-managed resource accessors @@ -134,7 +136,7 @@ export abstract class MCPResourceAccessor extends BaseResourceAccessor { */ async writeResource?( resourceUri: string, - _content: string | Uint8Array, + _content: string | Uint8Array | Array | Array, _options?: ResourceWriteOptions, ): Promise { logger.debug(`MCPResourceAccessor: Writing to resource ${resourceUri} on server ${this.serverId}`); diff --git a/api/src/dataSources/dataSourceConnection.ts b/api/src/dataSources/dataSourceConnection.ts index 609d9314..c340462b 100644 --- a/api/src/dataSources/dataSourceConnection.ts +++ b/api/src/dataSources/dataSourceConnection.ts @@ -15,6 +15,8 @@ import type { DataSourceProvider } from 'api/dataSources/interfaces/dataSourcePr import type { AuthConfig } from 'api/dataSources/interfaces/authentication.ts'; import type { DataSourceAccessMethod, DataSourceCapability, DataSourceProviderType } from 'shared/types/dataSource.ts'; import type { ResourceAccessor } from 'api/dataSources/interfaces/resourceAccessor.ts'; +import type { ValidationMode } from 'shared/types/resourceValidation.ts'; +import type { ResourceSuggestionsOptions, ResourceSuggestionsResponse } from '../utils/resourceSuggestions.utils.ts'; import { getDataSourceFactory } from 'api/dataSources/dataSourceFactory.ts'; import { //type DataSourceRegistry, @@ -202,11 +204,13 @@ export class DataSourceConnection implements IDataSourceConnection { /** * Check if resource path is within this data source + * @param resourceUri URI to validate + * @param mode Validation mode to use * @returns boolean */ - async isResourceWithinDataSource(resourceUri: string): Promise { + async isResourceWithinDataSource(resourceUri: string, mode?: ValidationMode): Promise { const accessor = await this.getResourceAccessor(); - return await accessor.isResourceWithinDataSource(resourceUri); + return await accessor.isResourceWithinDataSource(resourceUri, mode); } /** @@ -218,6 +222,23 @@ export class DataSourceConnection implements IDataSourceConnection { return await accessor.resourceExists(resourceUri, options); } + /** + * Suggest resources for autocomplete based on partial path + * @param partialPath Partial path input from user + * @param options Suggestion options (limit, filters, etc.) + * @returns Resource suggestions for autocomplete + */ + async suggestResources( + partialPath: string, + options: ResourceSuggestionsOptions, + ): Promise { + const accessor = await this.getResourceAccessor(); + if (!accessor.suggestResourcesForPath) { + return { suggestions: [], hasMore: false }; // Graceful fallback for unsupported accessors + } + return await accessor.suggestResourcesForPath(partialPath, options); + } + /** * Ensure resource path exists in the data source for this accessor * @param resourceUri The resource URI to check @@ -252,8 +273,18 @@ export class DataSourceConnection implements IDataSourceConnection { update(updates: Partial): void { // Don't allow changing id, providerType, or accessMethod if (updates.name !== undefined) this.name = updates.name; - if (updates.config !== undefined) this.config = { ...updates.config }; - if (updates.auth !== undefined) this.auth = { ...updates.auth }; + if (updates.config !== undefined) { + this.config = { ...updates.config }; + // Clear cached accessor to force recreation with new config + this._resourceAccessor = undefined; + logger.debug(`DataSourceConnection: Cleared cached accessor for ${this.id} due to config update`); + } + if (updates.auth !== undefined) { + this.auth = { ...updates.auth }; + // Clear cached accessor to force recreation with new auth credentials + this._resourceAccessor = undefined; + logger.debug(`DataSourceConnection: Cleared cached accessor for ${this.id} due to auth update`); + } if (updates.enabled !== undefined) this.enabled = updates.enabled; if (updates.isPrimary !== undefined) this.isPrimary = updates.isPrimary; if (updates.priority !== undefined) this.priority = updates.priority; diff --git a/api/src/dataSources/dataSourceRegistry.ts b/api/src/dataSources/dataSourceRegistry.ts index b0991663..57cf5b05 100644 --- a/api/src/dataSources/dataSourceRegistry.ts +++ b/api/src/dataSources/dataSourceRegistry.ts @@ -16,8 +16,6 @@ import type { MCPManager } from 'api/mcp/mcpManager.ts'; import { getMCPManager } from 'api/mcp/mcpManager.ts'; // Dynamic imports - providers will be loaded conditionally // import { FilesystemProvider } from 'api/dataSources/filesystemProvider.ts'; -// import { GoogleDocsProvider } from 'api/dataSources/googledocsProvider.ts'; -// import { NotionProvider } from 'api/dataSources/notionProvider.ts'; import { GenericMCPProvider } from 'api/dataSources/genericMCPProvider.ts'; import { CORE_DATASOURCES, type DataSourceMetadata } from './dataSource_manifest.ts'; import { exists } from '@std/fs'; @@ -330,11 +328,7 @@ export class DataSourceRegistry { */ private async detectProductVariant(): Promise<'opensource' | 'saas'> { try { - // Check if saas-specific providers exist by attempting to import them - const notionExists = await this.checkProviderExists('api/dataSources/notion/notionProvider.ts'); - const googleDocsExists = await this.checkProviderExists('api/dataSources/googledocs/googledocsProvider.ts'); - - return (notionExists || googleDocsExists) ? 'saas' : 'opensource'; + return 'opensource'; } catch (error) { logger.warn(`DataSourceRegistry: Error detecting product variant: ${(error as Error).message}`); return 'opensource'; // Default to opensource if detection fails diff --git a/api/src/dataSources/filesystem/filesystemAccessor.ts b/api/src/dataSources/filesystem/filesystemAccessor.ts index 6774edd6..89dcfe18 100644 --- a/api/src/dataSources/filesystem/filesystemAccessor.ts +++ b/api/src/dataSources/filesystem/filesystemAccessor.ts @@ -2,6 +2,7 @@ * FilesystemAccessor implementation for accessing filesystem resources. */ import { basename, dirname, join, relative } from '@std/path'; +import { globToRegExp } from '@std/path'; import { ensureDir, exists, @@ -9,6 +10,11 @@ import { walk, } from '@std/fs'; import type { WalkOptions } from '@std/fs'; +import type { + ResourceSuggestion, + ResourceSuggestionsOptions, + ResourceSuggestionsResponse, +} from '../../utils/resourceSuggestions.utils.ts'; import { logger } from 'shared/logger.ts'; import { BBResourceAccessor } from '../base/bbResourceAccessor.ts'; @@ -66,6 +72,8 @@ import type { TextMatch, } from 'shared/types/dataSourceResource.ts'; import type { DataSourceCapability, DataSourceMetadata } from 'shared/types/dataSource.ts'; +import type { PortableTextBlock } from 'api/types/portableText.ts'; +import type { TabularSheet } from 'api/types/tabular.ts'; /** * FilesystemAccessor for accessing filesystem resources @@ -208,6 +216,7 @@ export class FilesystemAccessor extends BBResourceAccessor { const isBinaryMime = resourceMetadata.mimeType.startsWith('image/') || resourceMetadata.mimeType.startsWith('audio/') || resourceMetadata.mimeType.startsWith('video/') || + resourceMetadata.mimeType.startsWith('application/pdf') || resourceMetadata.mimeType.startsWith('application/octet-stream'); // Handle range request if specified @@ -692,7 +701,7 @@ export class FilesystemAccessor extends BBResourceAccessor { */ override async writeResource( resourceUri: string, - content: string | Uint8Array, + content: string | Uint8Array, // | Array | Array, options: ResourceWriteOptions = {}, ): Promise { try { @@ -1775,6 +1784,193 @@ export class FilesystemAccessor extends BBResourceAccessor { return false; } + /** + * Suggest resources for autocomplete based on partial path + * @param partialPath Partial path input from user + * @param options Suggestion options (limit, filters, etc.) + * @returns Resource suggestions for autocomplete + */ + override async suggestResourcesForPath( + partialPath: string, + options: ResourceSuggestionsOptions, + ): Promise { + const { limit = 50, caseSensitive = false, type = 'all', followSymlinks: optionsFollowSymlinks } = options; + logger.warn('FilesystemAccessor: Suggesting resources for', { + partialPath, + followSymlinks: optionsFollowSymlinks, + }); + + // Remove leading slash as it's just a trigger, not part of the pattern + const searchPath = partialPath.replace(/^\//, ''); + + // Get exclude patterns + const excludeOptions = await getExcludeOptions(this.rootPath); + const excludePatterns = createExcludeRegexPatterns(excludeOptions, this.rootPath); + + // Generate patterns for matching + const patterns = this.createSuggestionPatterns(searchPath, { caseSensitive, type }); + if (patterns.length === 0) { + logger.info('FilesystemAccessor: No valid patterns generated'); + return { suggestions: [], hasMore: false }; + } + + // Collect matching files + const results: Array = []; + let reachedLimit = false; + + try { + for await ( + const entry of walk(this.rootPath, { + includeDirs: true, + followSymlinks: this.followSymlinks, + match: patterns, + skip: excludePatterns, + }) + ) { + // Check limit before adding + if (results.length >= limit) { + reachedLimit = true; + break; + } + + const stat = await Deno.stat(entry.path); + const relativePath = relative(this.rootPath, entry.path); + + results.push({ + dataSourceRoot: this.rootPath, + path: relativePath, + isDirectory: stat.isDirectory, + size: stat.size, + modified: stat.mtime?.toISOString(), + dataSourceName: this.connection.name, + }); + } + } catch (error) { + logger.error('FilesystemAccessor: Error walking directory', error); + throw createError( + ErrorType.FileHandling, + `Error walking directory: ${(error as Error).message}`, + ); + } + + // Apply type filtering if specified + const filteredResults = results.filter((entry) => { + if (type === 'directory') return entry.isDirectory; + if (type === 'file') return !entry.isDirectory; + return true; + }); + + return { + suggestions: filteredResults, + hasMore: reachedLimit, + }; + } + + /** + * Creates an array of RegExp patterns for matching file suggestions based on partial path input + */ + private createSuggestionPatterns( + partialPath: string, + options: { caseSensitive?: boolean; type?: 'all' | 'file' | 'directory' } = {}, + ): RegExp[] { + // Normalize path separators to forward slashes + partialPath = partialPath.replace(/\\/g, '/'); + + // Reject paths trying to escape root + if (partialPath.includes('../') || partialPath.includes('..\\')) { + logger.warn('FilesystemAccessor: Rejecting path that tries to escape root', { partialPath }); + return []; + } + + const patterns: RegExp[] = []; + const globOptions = { + flags: options.caseSensitive ? '' : 'i', + extended: true, + globstar: true, + }; + + // Helper to create regex with proper case sensitivity + const createRegex = (pattern: string) => { + const flags = options.caseSensitive ? '' : 'i'; + return new RegExp(pattern, flags); + }; + + // Handle empty input + if (!partialPath) { + const rootPattern = '**/*'; + patterns.push(globToRegExp(rootPattern, globOptions)); + + if (options.type !== 'file') { + const rootDirPattern = '*/'; + patterns.push(globToRegExp(rootDirPattern, globOptions)); + } + return patterns; + } + + // Handle brace expansion and multiple patterns + const expandBraces = (pattern: string): string[] => { + const match = pattern.match(/{([^}]+)}/); + if (!match) return [pattern]; + + const [fullMatch, alternatives] = match; + const parts = alternatives.split(',').map((p) => p.trim()); + + // Create a regex-compatible OR group + const orGroup = `(${parts.join('|')})`; + return [pattern.replace(fullMatch, orGroup)]; + }; + + // Split by | and handle brace expansion + const subPatterns = partialPath + .split('|') + .flatMap((pattern) => expandBraces(pattern)); + + for (const pattern of subPatterns) { + let singlePattern = pattern; + + // Handle directory patterns + if ( + singlePattern.endsWith('/') || + (!singlePattern.includes('*') && !singlePattern.includes('.') && !singlePattern.includes('{') && + !singlePattern.includes('(')) + ) { + singlePattern = singlePattern.slice(0, -1); + const dirPattern = singlePattern.includes('**') ? `${singlePattern}/**/*` : `**/${singlePattern}*/**/*`; + patterns.push(createRegex(globToRegExp(dirPattern, globOptions).source)); + } + + // Handle bare filename (no path, no wildcards) + if (!singlePattern.includes('/') && !singlePattern.includes('*')) { + const dirPattern = `**/${singlePattern}*/`; + patterns.push(globToRegExp(dirPattern, globOptions)); + + const filesPattern = `**/${singlePattern}*/**/*`; + patterns.push(globToRegExp(filesPattern, globOptions)); + } + + // Handle wildcard patterns + if (singlePattern.includes('*')) { + if (singlePattern.includes('**')) { + const prefixedPattern = singlePattern.startsWith('**/') ? singlePattern : `**/${singlePattern}`; + const pattern = globToRegExp(prefixedPattern, { ...globOptions, globstar: true }); + patterns.push(pattern); + } else { + if (singlePattern.includes('.')) { + const prefixedPattern = `**/${singlePattern}`; + patterns.push(createRegex(globToRegExp(prefixedPattern, globOptions).source)); + } else { + const dirPattern = `**/${singlePattern}`; + const contentsPattern = `**/${singlePattern}/**/*`; + patterns.push(globToRegExp(dirPattern, globOptions)); + patterns.push(globToRegExp(contentsPattern, globOptions)); + } + } + } + } + + return patterns; + } + /** * Check if this accessor has a specific capability * @param capability The capability to check for diff --git a/api/src/dataSources/filesystem/filesystemProvider.ts b/api/src/dataSources/filesystem/filesystemProvider.ts index d66ddf72..46a4f776 100644 --- a/api/src/dataSources/filesystem/filesystemProvider.ts +++ b/api/src/dataSources/filesystem/filesystemProvider.ts @@ -16,6 +16,13 @@ import type { DataSourceSearchCapability, } from 'shared/types/dataSource.ts'; import type { AcceptedContentType, AcceptedEditType, ContentTypeGuidance } from 'shared/types/dataSource.ts'; +import { + generateBinaryContentWritingInstructions, + generatePlainTextWritingInstructions, + generateSearchReplaceInstructions, + generateWorkflowInstructions, +} from 'api/utils/datasourceInstructions.ts'; +import type { InstructionFilters } from 'api/types/instructionFilters.ts'; /** * FilesystemProvider for BB-managed filesystem data sources @@ -167,6 +174,252 @@ export class FilesystemProvider extends BBDataSourceProvider { return true; } + /** + * Get detailed instructions for filesystem data source (both writing and editing) + * @param filters Optional filters to customize instruction content + * @returns Comprehensive instruction text with examples specific to filesystem capabilities + */ + getDetailedInstructions(filters?: InstructionFilters): string { + // For backward compatibility, return full instructions if no filters provided + // Filesystem provider primarily supports searchReplace, so filtering is minimal for now + if (!filters) { + return this.generateCompleteInstructions(); + } + + // Add minimal filtering support for utility operations + if (filters.operations && (filters.operations.includes('utility') || filters.operations.includes('rename'))) { + return this.generateUtilityInstructions(); + } + + // For filesystem provider, return full instructions for most cases + // TODO: Implement more granular filtering based on contentTypes, operations, etc. + return this.generateCompleteInstructions(); + } + + /** + * Generate complete instructions (backward compatibility) + */ + private generateCompleteInstructions(): string { + const instructions = [ + `# Filesystem Complete Instructions\n`, + `## Provider: ${this.name}\n`, + `Filesystem supports both file creation and editing operations.\n`, + `Choose the appropriate operation type based on your needs.\n\n`, + + `# 📝 CREATING NEW FILES\n\n`, + generatePlainTextWritingInstructions(), + '\n', + generateBinaryContentWritingInstructions(), + '\n', + `## Filesystem File Creation Features\n\n`, + `✅ **Text File Creation**: Create code, configuration, documentation files\n`, + `✅ **Binary File Support**: Create images, documents, executables\n`, + `✅ **Directory Creation**: Automatically creates missing parent directories\n`, + `✅ **Path Safety**: All files constrained to configured data source root\n`, + `✅ **Format Preservation**: Maintains exact formatting, whitespace, line endings\n\n`, + `### File Creation Workflow\n\n`, + `1. **Determine file type** (text or binary)\n`, + `2. **Choose content format**:\n`, + ` - Use **plainTextContent** for code, config, documentation, and text files\n`, + ` - Use **binaryContent** for images, PDFs, executables, and binary files\n`, + `3. **Provide complete file content** - no placeholders or partial content\n`, + `4. **Use proper file paths** - relative to data source root\n`, + `5. **Include correct file extensions** - affects MIME type detection\n\n`, + `### Supported File Types\n\n`, + `**Text Files (use plainTextContent):**\n`, + `- **Code**: .ts, .js, .py, .java, .cpp, .rs, .go, .php\n`, + `- **Configuration**: .json, .yaml, .toml, .env, .ini, .config\n`, + `- **Documentation**: .md, .txt, .rst, .org\n`, + `- **Web**: .html, .css, .xml, .svg\n`, + `- **Data**: .csv, .tsv, .sql\n\n`, + `**Binary Files (use binaryContent):**\n`, + `- **Images**: .png, .jpg, .gif, .webp, .ico\n`, + `- **Documents**: .pdf, .docx, .xlsx, .pptx\n`, + `- **Archives**: .zip, .tar, .gz, .7z\n`, + `- **Executables**: .exe, .app, .deb, .rpm\n\n`, + + `# ✏️ EDITING EXISTING FILES\n\n`, + `Filesystem data sources support file-based operations with search and replace editing.\n`, + `Operations work on individual files within the configured root directory.\n\n`, + generateSearchReplaceInstructions(), + '\n', + `## Filesystem Editing Specific Notes\n\n`, + `- **File-based Operations**: Each edit targets a specific file\n`, + `- **Path Restrictions**: All paths must be within the configured data source root\n`, + `- **Text Files Only**: Binary files cannot be edited (use write_resource to replace)\n`, + `- **Exact Matching**: Search patterns must match file content exactly\n`, + `- **Whitespace Sensitive**: Include all spaces, tabs, and newlines in search patterns\n`, + `- **Regex Support**: Use searchReplace_regexPattern=true for complex patterns\n\n`, + `### File Editing Workflow\n\n`, + `1. **Always load the file first** to see current content\n`, + `2. **Plan your changes** based on exact file content\n`, + `3. **Use precise search patterns** matching whitespace exactly\n`, + `4. **Apply search/replace operations** for text modifications\n\n`, + + `# 🔍 FINDING RESOURCES\n\n`, + `Filesystem supports comprehensive resource discovery with advanced file system search capabilities.\n`, + `The system provides powerful content and metadata search across the entire directory tree.\n\n`, + `## Filesystem Search Capabilities\n\n`, + `✅ **Directory Tree Search**: Recursive search through all subdirectories\n`, + `✅ **Content Search**: Full-text search within file contents (grep-style)\n`, + `✅ **Glob Pattern Support**: Advanced file name and path pattern matching\n`, + `✅ **Regex Support**: Complete regex support for content and name patterns\n`, + `✅ **Multi-file Type Support**: Searches across all text file types\n`, + `✅ **Exclusion Handling**: Automatically excludes .git, node_modules, build artifacts\n\n`, + `## Search Types and Patterns\n\n`, + `**Content Pattern Search**: Searches within file contents using grep-compatible regex\n`, + `**Resource Pattern Search**: Filters files by name/path using glob patterns\n`, + `**Date/Size Filtering**: Filter by modification date, creation date, file size\n`, + `**Extension Filtering**: Target specific file types with structured queries\n\n`, + `### Search Examples\n\n`, + `\`\`\`\n`, + `# Find functions in TypeScript files\n`, + `contentPattern: "function.*search" # Regex pattern\n`, + `resourcePattern: "**/*.ts" # TypeScript files anywhere\n`, + `regexPattern: true\n\n`, + `# Find configuration files\n`, + `resourcePattern: "**/config.*" # Config files at any depth\n\n`, + `# Find recent changes with TODO\n`, + `contentPattern: "TODO|FIXME|BUG" # Multiple patterns\n`, + `dateAfter: "2024-08-01"\n`, + `regexPattern: true\n\n`, + `# Find large files by extension\n`, + `structuredQuery: {\n`, + ` "filters": {\n`, + ` "extension": ".js",\n`, + ` "sizeMin": 1048576 // > 1MB\n`, + ` }\n`, + `}\n`, + `\`\`\`\n\n`, + `### Advanced Glob Patterns\n\n`, + `| Pattern | Matches | Example |\n`, + `|---------|---------|---------|\n`, + `| \`*.ts\` | TypeScript files in current dir | \`config.ts\` |\n`, + `| \`**/*.ts\` | TypeScript files anywhere | \`src/utils/helper.ts\` |\n`, + `| \`src/**/*.test.ts\` | Test files in src tree | \`src/components/Button.test.ts\` |\n`, + `| \`**/config.*\` | Config files anywhere | \`config.json\`, \`app/config.yaml\` |\n`, + `| \`{*.js,*.ts}\` | JavaScript or TypeScript | \`app.js\`, \`types.ts\` |\n\n`, + `### Search Capabilities & Limitations\n\n`, + `| Feature | Supported | Implementation |\n`, + `|---------|-----------|---------------|\n`, + `| File name/path search | ✅ | Native glob pattern matching |\n`, + `| File content search | ✅ | Grep-style regex search |\n`, + `| Date filtering | ✅ | File modification and creation time |\n`, + `| Size filtering | ✅ | File size in bytes with min/max |\n`, + `| Regex patterns | ✅ | Full JavaScript regex support |\n`, + `| Content context | ✅ | Configurable context lines before/after |\n`, + `| Case sensitivity | ✅ | Configurable for both content and names |\n`, + `| Binary file handling | ⚠️ | Automatically excluded from content search |\n`, + `| Gitignore respect | ✅ | Excludes ignored files automatically |\n`, + `| Symlink handling | ✅ | Configurable symlink following |\n\n`, + `### Result Detail Levels\n\n`, + `- **resource**: File list with metadata only\n`, + `- **container**: Files with directory structure information\n`, + `- **fragment**: Files with content snippets showing matches\n`, + `- **detailed**: Full context with configurable lines before/after matches\n\n`, + `### Best Practices for File Discovery\n\n`, + `1. **Use Specific Patterns**: Target exact file types and directories for better performance\n`, + `2. **Combine Approaches**: Use both content and resource patterns for precise targeting\n`, + `3. **Leverage Exclusions**: System automatically excludes .git, node_modules, build folders\n`, + `4. **Size Awareness**: Use size filters to find large files or exclude them from content search\n`, + `5. **Date Filtering**: Focus searches on recent changes or specific time periods\n`, + `6. **Context Control**: Adjust contextLines to get appropriate surrounding content\n`, + `7. **Performance**: Use specific patterns rather than broad searches for large codebases\n\n`, + `### Performance Considerations\n\n`, + `- **Content searches** scan file contents and can be slower for large repositories\n`, + `- **Resource pattern searches** only check filenames and are much faster\n`, + `- **Exclusion patterns** significantly improve performance by skipping irrelevant files\n`, + `- **Binary files** are automatically excluded from content searches\n`, + `- **Large files** (>10MB) may cause timeouts in content searches\n\n`, + `# 🔄 GENERAL WORKFLOW\n\n`, + generateWorkflowInstructions(), + '\n', + `## Filesystem Best Practices\n\n`, + `✅ **For new files**: Choose appropriate content type (plain text vs binary)\n`, + `✅ **For code files**: Use plainTextContent with proper formatting and line counts\n`, + `✅ **For existing files**: Load first to see current content structure\n`, + `✅ **For text changes**: Use search/replace with exact pattern matching\n`, + `✅ **For binary files**: Use write_resource to replace entire file\n`, + `✅ **For safety**: All operations are constrained to data source root\n\n`, + ].join(''); + + return instructions; + } + + /** + * Generate utility operation instructions (rename, move, remove) + */ + private generateUtilityInstructions(): string { + const instructions = [ + `# Filesystem Utility Operations Instructions\n`, + `## Provider: ${this.name}\n`, + `Filesystem supports rename, move, and remove operations with standard path formats.\n\n`, + `# 🔄 RENAMING RESOURCES\n\n`, + `Filesystem supports straightforward renaming of files and directories.\n`, + `All paths use standard relative filesystem paths from the data source root.\n\n`, + `## Path Format Requirements\n\n`, + `📁 **FILESYSTEM PATH FORMAT**:\n`, + `- ✅ **Files**: "src/config.ts", "tests/helper.test.ts"\n`, + `- ✅ **Directories**: "src/utils", "docs/api"\n`, + `- ✅ **Nested paths**: "project/src/components/Button.tsx"\n`, + `- ✅ **Extensions**: Always include file extensions (.ts, .js, .md, etc.)\n\n`, + `🔒 **PATH SECURITY**:\n`, + `- All paths must be relative to data source root\n`, + `- Cannot access files outside the configured directory\n`, + `- Parent directory traversal (..) is not allowed\n\n`, + `## Rename Examples\n\n`, + `\`\`\`json\n`, + `// Single file rename\n`, + `{\n`, + ` "operations": [{\n`, + ` "source": "src/oldFile.ts",\n`, + ` "destination": "src/newFile.ts"\n`, + ` }]\n`, + `}\n\n`, + `// Directory rename\n`, + `{\n`, + ` "operations": [{\n`, + ` "source": "old-utils",\n`, + ` "destination": "utils"\n`, + ` }]\n`, + `}\n`, + `\`\`\`\n\n`, + `## Safety Features\n\n`, + `✅ **Overwrite Protection**: Set overwrite=false by default\n`, + `✅ **Directory Creation**: Set createMissingDirectories=true to create parent dirs\n`, + `✅ **Path Validation**: All paths validated against data source root\n`, + `✅ **Atomic Operations**: All rename operations complete or fail together\n\n`, + `## Common Issues & Solutions\n\n`, + `🔴 **"File not found" Errors**:\n`, + `- Source path doesn't exist or is outside data source root\n`, + `- Use \`find_resources\` to locate the correct file path\n`, + `- Verify path is relative to data source root, not absolute\n\n`, + `🔴 **"Permission denied" Errors**:\n`, + `- File or directory permissions prevent filesystem operations\n`, + `- Check that BB has read/write access to the filesystem location\n`, + `- Ensure destination directory has write permissions\n\n`, + `🔴 **"Path outside data source" Errors**:\n`, + `- Attempted to access files outside the configured root directory\n`, + `- Use relative paths only (e.g., "src/file.ts" not "/usr/src/file.ts")\n`, + `- Cannot use .. to traverse parent directories\n\n`, + `# 📦 MOVING RESOURCES\n\n`, + `Move operations relocate files/directories to different locations within the data source.\n`, + `Use standard filesystem paths for both source and destination.\n\n`, + `# 🗑️ REMOVING RESOURCES\n\n`, + `Remove operations delete files and directories from the filesystem.\n`, + `Use standard filesystem paths for resources to delete.\n\n`, + `⚠️ **CRITICAL**: Filesystem deletions are permanent - no trash/undo functionality.\n\n`, + `# 💡 BEST PRACTICES\n\n`, + `1. **Always load resources first** using find_resources to verify paths\n`, + `2. **Use relative paths** from data source root (never absolute paths)\n`, + `3. **Include file extensions** in all file paths for clarity\n`, + `4. **Check for references** - update imports/requires after moving files\n`, + `5. **Batch related operations** to maintain consistency\n\n`, + ].join(''); + + return instructions; + } + /** * Get content type guidance for filesystem data source * @returns ContentTypeGuidance with filesystem-specific examples and constraints @@ -240,6 +493,72 @@ export class FilesystemProvider extends BBDataSourceProvider { }; } + /** + * Get filesystem-specific error guidance for LLM tool usage + * Provides enhanced error messaging focused on path and permission issues + */ + override getErrorGuidance( + errorType: + | 'not_found' + | 'permission_denied' + | 'invalid_format' + | 'workflow_violation' + | 'configuration' + | 'unknown', + operation: string, + hasLoadedInstructions: boolean = false, + ): { message: string; type: 'workflow' | 'instructions' | 'configuration' | 'format' } { + // Base instruction reminder + const instructionsReminder = hasLoadedInstructions + ? "💡 Review the filesystem datasource instructions you've loaded, especially the workflow sections." + : "🔍 **Load filesystem datasource instructions first**: Use `loadDatasource` with `returnType='instructions'` to get detailed filesystem workflows and requirements."; + + // Filesystem-specific guidance based on error type + switch (errorType) { + case 'not_found': + return { + message: + `${instructionsReminder}\n\n📋 **Filesystem resource not found**: This typically indicates:\n- File or directory doesn't exist at the specified path\n- Path is outside the configured data source root directory\n- **Recommendation**: Use \`find_resources\` to search for the file or verify the correct path`, + type: 'workflow', + }; + + case 'permission_denied': + return { + message: + `${instructionsReminder}\n\n🔐 **Filesystem access denied**: This usually means:\n- File or directory permissions don't allow ${operation} access\n- Path is outside the configured data source root (security restriction)\n- **Check**: Verify the path is within the data source root and has appropriate file permissions`, + type: 'configuration', + }; + + case 'invalid_format': + return { + message: + `${instructionsReminder}\n\n⚠️ **Filesystem format issue**: For filesystem operations:\n- Use plainTextContent for text files (code, config, docs)\n- Use binaryContent for images, PDFs, executables\n- **Note**: Binary files cannot be edited - use \`write_resource\` to replace them`, + type: 'format', + }; + + case 'workflow_violation': + return { + message: + `${instructionsReminder}\n\n⚠️ **Filesystem workflow issue**: Common problems:\n- **For editing**: Load the file first to see current content, then use exact search patterns\n- **For new files**: Ensure parent directories exist or set \`createMissingDirectories=true\`\n- **Path format**: Use relative paths from data source root (e.g., "src/file.ts")`, + type: 'workflow', + }; + + case 'configuration': + return { + message: + `${instructionsReminder}\n\n⚙️ **Filesystem configuration issue**: Check:\n- Data source root directory is correctly configured and accessible\n- \`strictRoot\` and \`followSymlinks\` settings are appropriate\n- File system permissions allow BB to access the directory`, + type: 'configuration', + }; + + default: + return { + message: + `${instructionsReminder}\n\n🔧 **Filesystem ${operation} operation failed**: Filesystem operations are straightforward:\n- **Files must be within the data source root** (security boundary)\n- **Load files first** to see current content before editing\n- **Use exact search patterns** for text file modifications`, + type: 'workflow', + }; + } + } + /** * Create a filesystem data source with the specified configuration * @param name Human-readable name for the data source diff --git a/api/src/dataSources/interfaces/dataSourceConnection.ts b/api/src/dataSources/interfaces/dataSourceConnection.ts index 74dc6738..5ffe4459 100644 --- a/api/src/dataSources/interfaces/dataSourceConnection.ts +++ b/api/src/dataSources/interfaces/dataSourceConnection.ts @@ -3,10 +3,16 @@ * DataSourceConnection represents a specific configured instance of a data source. */ import type { DataSourceProvider } from 'api/dataSources/interfaces/dataSourceProvider.ts'; -import type { DataSourceAccessMethod, DataSourceCapability, DataSourceProviderType } from 'shared/types/dataSource.ts'; +import type { + DataSourceAccessMethod, + DataSourceCapability, + DataSourceConfig, + DataSourceProviderType, +} from 'shared/types/dataSource.ts'; import type { AuthConfig } from 'api/dataSources/interfaces/authentication.ts'; import type { ResourceAccessor } from 'api/dataSources/interfaces/resourceAccessor.ts'; import type { ProjectConfig } from 'shared/config/types.ts'; +import type { ValidationMode } from 'shared/types/resourceValidation.ts'; /** * DataSourceConnection interface @@ -56,7 +62,7 @@ export interface DataSourceConnection { /** * Provider-specific configuration */ - config: Record; + config: DataSourceConfig; /** * Authentication details (optional) @@ -83,6 +89,12 @@ export interface DataSourceConnection { */ update(updates: Partial): void; + /** + * Get the URI for this data source + * @returns The URI to use for resources from this data source + */ + getUriForResource(resourceUri: string): string; + /** * Get a ResourceAccessor for a data source connection * @returns A ResourceAccessor instance @@ -91,8 +103,10 @@ export interface DataSourceConnection { /** * Is resource path within the data source - not an exists test, just valid path within data source + * @param resourceUri URI to validate + * @param mode Validation mode to use (strict, lenient, or partial) */ - isResourceWithinDataSource(resourceUri: string): Promise; + isResourceWithinDataSource(resourceUri: string, mode?: ValidationMode): Promise; /** * Does resource exist in the data source @@ -132,7 +146,7 @@ export interface DataSourceConnectionValues { providerType: DataSourceProviderType; accessMethod: DataSourceAccessMethod; name: string; - config: Record; + config: DataSourceConfig; auth?: AuthConfig; enabled: boolean; isPrimary: boolean; @@ -151,7 +165,7 @@ export interface DataSourceConnectionSystemPrompt { providerType: DataSourceProviderType; accessMethod: DataSourceAccessMethod; name: string; - config: Record; + config: DataSourceConfig; capabilities: DataSourceCapability[]; description?: string; isPrimary: boolean; diff --git a/api/src/dataSources/interfaces/dataSourceProvider.ts b/api/src/dataSources/interfaces/dataSourceProvider.ts index 0abb3fa0..22f80255 100644 --- a/api/src/dataSources/interfaces/dataSourceProvider.ts +++ b/api/src/dataSources/interfaces/dataSourceProvider.ts @@ -11,6 +11,13 @@ import type { DataSourceCapability, } from 'shared/types/dataSource.ts'; import type { AcceptedContentType, AcceptedEditType, ContentTypeGuidance } from 'shared/types/dataSource.ts'; +import type { InstructionFilters } from 'api/types/instructionFilters.ts'; +import type { + URIConstructionOptions, + URIContext, + ValidationMode, + ValidationResult, +} from 'shared/types/resourceValidation.ts'; /** * DataSourceProvider interface @@ -109,4 +116,58 @@ export interface DataSourceProvider { * @returns ContentTypeGuidance object with usage examples and constraints */ getContentTypeGuidance(): ContentTypeGuidance; + + /** + * Get detailed editing instructions for LLM tool usage + * Must be implemented by subclasses to provide comprehensive editing guidance + * @param filters Optional filters to customize instruction content + * @returns Detailed instruction text with provider-specific examples and workflows + */ + getDetailedInstructions(filters?: InstructionFilters): string; + + /** + * Get error-specific guidance for LLM tool usage + * Provides context-aware error messaging based on error type and operation + * @param errorType The type of error encountered + * @param operation The operation that failed + * @param hasLoadedInstructions Whether the LLM has already loaded instructions + * @returns Object with enhanced error message and guidance type + */ + getErrorGuidance( + errorType: + | 'not_found' + | 'permission_denied' + | 'invalid_format' + | 'workflow_violation' + | 'configuration' + | 'unknown', + operation: string, + hasLoadedInstructions?: boolean, + ): { message: string; type: 'workflow' | 'instructions' | 'configuration' | 'format' }; + + /** + * Build a datasource-specific URI from a partial path + * Each provider can implement its own URI construction logic + * @param partialPath Partial path or resource identifier + * @param options Construction options with context + * @returns Properly formatted URI for this datasource type + */ + buildResourceUri?(partialPath: string, options: URIConstructionOptions): string; + + /** + * Validate a resource URI with datasource-specific rules + * @param resourceUri URI to validate + * @param mode Validation mode to use + * @returns Detailed validation result + */ + validateResourceUri?(resourceUri: string, mode?: ValidationMode): ValidationResult; + + /** + * Check if a partial path could be valid for this datasource + * Used for autocomplete and suggestion filtering + * @param partialPath Partial path being typed + * @param context Context for the validation + * @returns Whether the partial path is potentially valid + */ + isPartialPathValid?(partialPath: string, context?: URIContext): boolean; } diff --git a/api/src/dataSources/interfaces/resourceAccessor.ts b/api/src/dataSources/interfaces/resourceAccessor.ts index 74aeb6c5..ffc0edcd 100644 --- a/api/src/dataSources/interfaces/resourceAccessor.ts +++ b/api/src/dataSources/interfaces/resourceAccessor.ts @@ -24,6 +24,15 @@ import type { } from 'shared/types/dataSourceResource.ts'; import type { DataSourceMetadata } from 'shared/types/dataSource.ts'; import type { PortableTextBlock } from 'api/types/portableText.ts'; +import type { TabularSheet } from 'api/types/tabular.ts'; +import type { + EnhancedURIParseResult, + URIConstructionOptions, + URIContext, + ValidationMode, + ValidationResult, +} from 'shared/types/resourceValidation.ts'; +import type { ResourceSuggestionsOptions, ResourceSuggestionsResponse } from '../../utils/resourceSuggestions.utils.ts'; /** * ResourceAccessor interface @@ -43,11 +52,36 @@ export interface ResourceAccessor { /** * Check if resource exists in the data source for this accessor * @param resourceUri The resource URI to check + * @param mode Validation mode to use * @returns boolean */ - isResourceWithinDataSource(resourceUri: string): Promise; + isResourceWithinDataSource(resourceUri: string, mode?: ValidationMode): Promise; resourceExists(resourceUri: string, options?: { isFile?: boolean }): Promise; + /** + * Build a resource URI for this datasource with context awareness + * @param partialPath Partial path or resource identifier + * @param options Construction options with context + * @returns Properly formatted URI for this datasource + */ + buildResourceUri?(partialPath: string, options: URIConstructionOptions): Promise; + + /** + * Parse and validate a resource URI with enhanced context + * @param resourceUri URI to parse and validate + * @param mode Validation mode to use + * @returns Enhanced parsing result with validation context + */ + parseResourceUri?(resourceUri: string, mode?: ValidationMode): Promise; + + /** + * Validate a resource URI with detailed feedback + * @param resourceUri URI to validate + * @param mode Validation mode to use + * @returns Detailed validation result + */ + validateResourceUri?(resourceUri: string, mode?: ValidationMode): Promise; + /** * Ensure resource path exists in the data source for this accessor * @param resourceUri The resource URI to check @@ -106,7 +140,7 @@ export interface ResourceAccessor { */ writeResource?( resourceUri: string, - content: string | Uint8Array | PortableTextBlock[], + content: string | Uint8Array | Array | Array, options?: ResourceWriteOptions, ): Promise; @@ -144,6 +178,17 @@ export interface ResourceAccessor { */ deleteResource?(resourceUri: string, options?: ResourceDeleteOptions): Promise; + /** + * Suggest resources for autocomplete based on partial path (optional capability) + * @param partialPath Partial path input from user + * @param options Suggestion options (limit, filters, etc.) + * @returns Resource suggestions for autocomplete + */ + suggestResourcesForPath?( + partialPath: string, + options: ResourceSuggestionsOptions, + ): Promise; + /** * Check if this accessor has a specific capability * @param capability The capability to check for diff --git a/api/src/dataSources/mcp/genericMCPProvider.ts b/api/src/dataSources/mcp/genericMCPProvider.ts index 50c3fe24..6e2975fd 100644 --- a/api/src/dataSources/mcp/genericMCPProvider.ts +++ b/api/src/dataSources/mcp/genericMCPProvider.ts @@ -18,6 +18,7 @@ import type { DataSourceSearchCapability, } from 'shared/types/dataSource.ts'; import type { AcceptedContentType, AcceptedEditType, ContentTypeGuidance } from 'shared/types/dataSource.ts'; +import { generateWorkflowInstructions } from 'api/utils/datasourceInstructions.ts'; /** * GenericMCPProvider for MCP-managed data sources @@ -145,6 +146,56 @@ export class GenericMCPProvider extends MCPDataSourceProvider { logger.debug(`GenericMCPProvider: Created provider for MCP server ${serverId}`); } + /** + * Get detailed editing instructions for MCP data source + * @returns Comprehensive instruction text with examples specific to MCP capabilities + */ + getDetailedInstructions(): string { + const instructions = [ + `# MCP Data Source Instructions\n`, + `## Provider: ${this.name} (MCP Server: ${this.serverId})\n`, + `This data source is managed by an external Model Context Protocol (MCP) server.\n`, + `Operations and capabilities are determined by the MCP server implementation.\n\n`, + `## MCP-Specific Notes\n\n`, + `- **External Management**: Data operations are handled by the MCP server at \"${this.serverId}\"\n`, + `- **Server-Dependent Capabilities**: Available operations depend on server implementation\n`, + `- **Tool-Based Operations**: Use the \"mcp\" tool for interacting with this data source\n`, + `- **Server Capabilities**: Current capabilities include: [${this.getServerCapabilities().join(', ')}]\n\n`, + `### Using MCP Tools\n\n`, + `MCP data sources are accessed using the dedicated MCP tool:\n\n`, + `\`\`\`json\n`, + `{\n`, + ` "tool": "mcp",\n`, + ` "serverName": "${this.serverId}",\n`, + ` "toolName": "server-specific-tool-name",\n`, + ` "toolInput": {\n`, + ` // Parameters specific to the MCP server's tool\n`, + ` }\n`, + `}\n`, + `\`\`\`\n\n`, + `### Server-Specific Documentation\n\n`, + `- **Check Server Documentation**: Each MCP server has its own tools and parameters\n`, + `- **Available Tools**: Use MCP discovery to find available tools for this server\n`, + `- **Tool Schemas**: Each tool defines its own input/output schema\n`, + `- **Error Handling**: Server-specific error messages and handling\n\n`, + `### Common MCP Operations\n\n`, + `Most MCP servers support these general patterns:\n\n`, + `- **Resource Listing**: Tools to list available resources\n`, + `- **Resource Reading**: Tools to read/fetch resource content\n`, + `- **Search Operations**: Tools to search within the data source\n`, + `- **Custom Operations**: Server-specific business logic and workflows\n\n`, + generateWorkflowInstructions(), + '\n', + `### MCP-Specific Workflow\n\n`, + `1. **Discover Available Tools**: Use MCP discovery to find server capabilities\n`, + `2. **Check Tool Schemas**: Review input/output requirements for each tool\n`, + `3. **Use MCP Tool**: Call tools via the \"mcp\" tool with proper parameters\n`, + `4. **Handle Server Responses**: Process server-specific response formats\n\n`, + ].join(''); + + return instructions; + } + /** * Get content type guidance for MCP data source * @returns ContentTypeGuidance with MCP-specific information diff --git a/api/src/editor/projectEditor.ts b/api/src/editor/projectEditor.ts index 6528690d..b1c911d9 100644 --- a/api/src/editor/projectEditor.ts +++ b/api/src/editor/projectEditor.ts @@ -4,7 +4,7 @@ import type LLMConversationInteraction from 'api/llms/conversationInteraction.ts import type { ProjectInfo as BaseProjectInfo } from 'api/llms/conversationInteraction.ts'; //import type { LLMMessageContentPartImageBlockSourceMediaType } from 'api/llms/llmMessage.ts'; import OrchestratorController from 'api/controllers/orchestratorController.ts'; -import type { SessionManager } from 'api/auth/session.ts'; +import type { UserContext } from 'shared/types/app.ts'; import { logger } from 'shared/logger.ts'; import { createError, ErrorType } from 'api/utils/error.ts'; import type { ProjectHandlingErrorOptions } from 'api/errors/error.ts'; @@ -28,6 +28,8 @@ import type { import type { ProjectConfig } from 'shared/config/types.ts'; import type { StatementParams } from 'shared/types/collaboration.ts'; import type { CollaborationId, CollaborationResponse, InteractionId, ProjectId } from 'shared/types.ts'; +import type { SamplingCreateMessageParams } from 'api/types/mcp.ts'; +import type { LLMSpeakWithResponse } from 'api/types.ts'; //import type { LLMRequestParams } from 'api/types/llms.ts'; import type { LLMToolManagerToolSetType } from '../llms/llmToolManager.ts'; import { getBbDir, resolveDataSourceFilePath } from 'shared/dataDir.ts'; @@ -53,7 +55,7 @@ class ProjectEditor { public eventManager!: EventManager; public mcpManager!: MCPManager; public resourceManager!: ResourceManager; - public sessionManager: SessionManager; + public userContext: UserContext; public projectId: ProjectId; public toolSet: LLMToolManagerToolSetType = 'coding'; @@ -66,11 +68,11 @@ class ProjectEditor { //tier: null, }; - constructor(projectId: ProjectId, sessionManager: SessionManager) { + constructor(projectId: ProjectId, userContext: UserContext) { this.projectId = projectId; this._projectInfo.projectId = projectId; - this.sessionManager = sessionManager; - //logger.info('ProjectEditor: sessionManager', sessionManager); + this.userContext = userContext; + //logger.info('ProjectEditor: userContext', userContext); } public async init(): Promise { @@ -245,6 +247,7 @@ class ProjectEditor { this._projectInfo = projectInfo; } + // deno-lint-ignore require-await public async updateProjectInfo(): Promise { // If we've already generated the metadata, skip regeneration if (this.projectInfo.type === 'metadata') { @@ -371,6 +374,37 @@ class ProjectEditor { return statementAnswer; } + /** + * Handle MCP sampling request + * Similar to handleStatement but for MCP sampling requests + */ + async handleSamplingRequest( + params: SamplingCreateMessageParams, + collaborationId: CollaborationId, + mcpServerDetails: { name?: string; id: string }, + statementParams?: StatementParams, + ): Promise { + const interaction = await this.initInteraction(collaborationId); + logger.info( + `ProjectEditor: Initialized collaboration with ID: ${collaborationId} using interaction with ID ${interaction.id}, handling sampling request for server ${ + mcpServerDetails.name || mcpServerDetails.id + }`, + ); + + // Convert statementParams to speakOptions if provided + const speakOptions = statementParams?.rolesModelConfig?.orchestrator || undefined; + + const samplingResponse = await this.orchestratorController.generateSamplingResponse( + params, + interaction.collaboration, + interaction.id, + mcpServerDetails, + speakOptions, + ); + + return samplingResponse; + } + // prepareResourcesForInteraction is called by load_resources tool // only existing resources can be prepared and added, otherwise call write_resource tools with createIfMissing:true async prepareResourcesForInteraction( @@ -384,9 +418,11 @@ class ProjectEditor { // Always load from original source to ensure we have the latest version logger.info(`ProjectEditor: Get resource for: ${resourceUri}`); const resource = await this.resourceManager.loadResource(resourceUri, options); + //logger.info(`ProjectEditor: Got resource for: ${resourceUri}`); // Store at project level for future reference await this.projectData.storeProjectResource(resourceUri, resource.content, resource.metadata); + //logger.info(`ProjectEditor: Stored resource for: ${resourceUri}`); // Extract resource name from metadata or use URI as fallback const resourceName = resource.metadata?.name || resourceUri; diff --git a/api/src/editor/projectEditorManager.ts b/api/src/editor/projectEditorManager.ts index 549c0dd6..43f328af 100644 --- a/api/src/editor/projectEditorManager.ts +++ b/api/src/editor/projectEditorManager.ts @@ -1,6 +1,6 @@ import ProjectEditor from 'api/editor/projectEditor.ts'; import type { CollaborationId, ProjectId } from 'shared/types.ts'; -import type { SessionManager } from 'api/auth/session.ts'; +import type { UserContext } from 'shared/types/app.ts'; import { errorMessage } from 'shared/error.ts'; import { logger } from 'shared/logger.ts'; @@ -8,18 +8,23 @@ class ProjectEditorManager { private projectEditors: Map = new Map(); // Track in-progress editor creations by projectId private pendingEditorCreations: Map> = new Map(); + // Track current editor for getCurrentEditor method + private currentEditorKey: string | undefined = undefined; + private currentCollaborationId: CollaborationId | undefined = undefined; + // Configuration for when to update current editor: 'create', 'get', or 'both' + private trackCurrentEditorOn: 'create' | 'get' | 'both' = 'both'; async getOrCreateEditor( projectId: ProjectId, collaborationId: CollaborationId | undefined, - sessionManager: SessionManager, + userContext: UserContext, ): Promise { if (!collaborationId) { throw new Error('CollaborationId is required to create a new ProjectEditor'); } // Create a composite key using both projectId and collaborationId - const editorKey = `${projectId}-${collaborationId}`; + const editorKey = `${projectId}|${collaborationId}`; if (this.projectEditors.has(editorKey)) { return this.projectEditors.get(editorKey)!; @@ -31,7 +36,7 @@ class ProjectEditorManager { if (!editorCreationPromise) { // Only create a new promise if one doesn't exist yet logger.info(`ProjectEditorManager: Creating projectEditor for ${editorKey}`); - editorCreationPromise = this.createEditorWithLock(editorKey, projectId, collaborationId, sessionManager); + editorCreationPromise = this.createEditorWithLock(editorKey, projectId, collaborationId, userContext); this.pendingEditorCreations.set(editorKey, editorCreationPromise); // Since we created this promise, we're responsible for cleanup @@ -55,6 +60,13 @@ class ProjectEditorManager { try { const projectEditor = await editorCreationPromise; if (!projectEditor) throw new Error('Unable to get ProjectEditor'); + + // Update current editor tracking if enabled for 'create' operations + if (this.trackCurrentEditorOn === 'create' || this.trackCurrentEditorOn === 'both') { + this.currentEditorKey = editorKey; + this.currentCollaborationId = collaborationId; + } + logger.info(`ProjectEditorManager: Returning projectEditor for ${editorKey}`); return projectEditor; } catch (error) { @@ -67,10 +79,10 @@ class ProjectEditorManager { editorKey: string, projectId: ProjectId, collaborationId: CollaborationId, - sessionManager: SessionManager, + userContext: UserContext, ): Promise { //try { - const projectEditor = await new ProjectEditor(projectId, sessionManager).init(); + const projectEditor = await new ProjectEditor(projectId, userContext).init(); this.projectEditors.set(editorKey, projectEditor); await projectEditor.initCollaboration(collaborationId); return projectEditor; @@ -80,21 +92,114 @@ class ProjectEditorManager { } getEditor(collaborationId: CollaborationId, projectId?: string): ProjectEditor | undefined { - const editorKey = projectId ? `${projectId}-${collaborationId}` : collaborationId; - return this.projectEditors.get(editorKey); + const editorKey = projectId ? `${projectId}|${collaborationId}` : collaborationId; + const projectEditor = this.projectEditors.get(editorKey); + + // Update current editor tracking if enabled for 'get' operations + if (projectEditor && (this.trackCurrentEditorOn === 'get' || this.trackCurrentEditorOn === 'both')) { + this.currentEditorKey = editorKey; + this.currentCollaborationId = collaborationId; + } + + return projectEditor; + } + + /** + * Get the current ProjectEditor without requiring a collaborationId. + * Returns the most recently used editor (if tracking is enabled) or the first created editor as fallback. + * @returns Object with projectEditor and collaborationId, or undefined if no editors exist + */ + getCurrentEditorWithCollaborationId(): + | { projectEditor: ProjectEditor; collaborationId: CollaborationId } + | undefined { + // First, try to return the tracked current editor + if (this.currentEditorKey && this.currentCollaborationId) { + const projectEditor = this.projectEditors.get(this.currentEditorKey); + if (projectEditor) { + return { + projectEditor, + collaborationId: this.currentCollaborationId, + }; + } + } + + // Fallback: return the first available editor if no current is tracked or current is stale + const firstEntry = this.projectEditors.entries().next(); + if (!firstEntry.done) { + const [editorKey, projectEditor] = firstEntry.value; + // Parse collaborationId from the composite key + // Key format is either "${projectId}|${collaborationId}" or just "${collaborationId}" + const dashIndex = editorKey.lastIndexOf('|'); + const collaborationId = dashIndex > 0 ? editorKey.substring(dashIndex + 1) : editorKey; + + return { + projectEditor, + collaborationId, + }; + } + + // No editors available + return undefined; + } + + /** + * Get the current ProjectEditor without requiring a collaborationId. + * Returns the most recently used editor (if tracking is enabled) or the first created editor as fallback. + * @returns projectEditor or undefined if no editors exist + */ + getCurrentEditor(): ProjectEditor | undefined { + const editorWithCollabId = this.getCurrentEditorWithCollaborationId(); + if (editorWithCollabId) return editorWithCollabId.projectEditor; + // No editors available + return undefined; + } + + /** + * Get the current CollaborationId. + * Returns the most recently used ID (if tracking is enabled) or the first ID as fallback. + * @returns CollaborationId or undefined if no ID exists + */ + getCurrentCollaborationId(): CollaborationId | undefined { + const editorWithCollabId = this.getCurrentEditorWithCollaborationId(); + if (editorWithCollabId) return editorWithCollabId.collaborationId; + // No CollaborationId available + return undefined; } releaseEditor(collaborationId: CollaborationId, projectId?: string): void { - const editorKey = projectId ? `${projectId}-${collaborationId}` : collaborationId; + const editorKey = projectId ? `${projectId}|${collaborationId}` : collaborationId; + + // Clear current editor tracking if we're releasing the current editor + if (this.currentEditorKey === editorKey) { + this.currentEditorKey = undefined; + this.currentCollaborationId = undefined; + } + this.projectEditors.delete(editorKey); } isCollaborationActive(collaborationId: CollaborationId | undefined, projectId?: string): boolean { if (!collaborationId) return false; - const editorKey = projectId && collaborationId ? `${projectId}-${collaborationId}` : collaborationId; + const editorKey = projectId && collaborationId ? `${projectId}|${collaborationId}` : collaborationId; return editorKey ? this.projectEditors.has(editorKey) : false; } + /** + * Configure when to track current editor updates + * @param mode 'create' tracks on creation, 'get' tracks on access, 'both' tracks on both + */ + setTrackCurrentEditorOn(mode: 'create' | 'get' | 'both'): void { + this.trackCurrentEditorOn = mode; + } + + /** + * Get current tracking configuration + * @returns Current tracking mode + */ + getTrackCurrentEditorOn(): 'create' | 'get' | 'both' { + return this.trackCurrentEditorOn; + } + /** * Returns all active ProjectEditor instances * @returns A Map of collaborationId to ProjectEditor diff --git a/api/src/errors/error.ts b/api/src/errors/error.ts index 241ecbc8..49867f98 100644 --- a/api/src/errors/error.ts +++ b/api/src/errors/error.ts @@ -7,11 +7,13 @@ export { isError } from 'shared/error.ts'; export enum ErrorType { CommandExecution = 'CommandExecution', Persistence = 'PersistenceError', + Authentication = 'AuthenticationError', API = 'APIError', LLM = 'LLMError', LLMRateLimit = 'RateLimitError', LLMValidation = 'ValidationError', ToolHandling = 'ToolHandlingError', + MCPServer = 'MCPServerError', ProjectHandling = 'ProjectHandlingError', DataSourceHandling = 'DataSourceHandlingError', FileHandling = 'FileHandlingError', @@ -35,10 +37,12 @@ export enum ErrorType { export const ErrorTypes = [ ErrorType.API, ErrorType.Persistence, + ErrorType.Authentication, ErrorType.LLM, ErrorType.LLMRateLimit, ErrorType.LLMValidation, ErrorType.ToolHandling, + ErrorType.MCPServer, ErrorType.ProjectHandling, ErrorType.DataSourceHandling, @@ -71,6 +75,13 @@ export interface ErrorOptions { name: string; } +export interface AuthenticationErrorOptions extends ErrorOptions { + userId?: string; + method?: string; + token?: string; + reason?: string; +} + export interface APIErrorOptions extends ErrorOptions { status?: Status; path?: string; @@ -117,6 +128,20 @@ export interface TokenUsageValidationErrorOptions extends ErrorOptions { constraint?: string; } +export class AuthenticationError extends Error { + constructor( + message: string, + public options?: AuthenticationErrorOptions, + ) { + super(message); + this.name = ErrorType.Authentication; + this.options = options; + } +} +export const isAuthenticationError = (value: unknown): value is AuthenticationError => { + return value instanceof AuthenticationError; +}; + export class APIError extends Error { public status: Status; constructor( @@ -434,6 +459,29 @@ export const isToolHandlingError = (value: unknown): value is ToolHandlingError return value instanceof ToolHandlingError; }; +export interface MCPServerErrorOptions extends ErrorOptions { + serverName: string; + operation: 'tool-run' | 'tool-input' | 'validation' | 'capability-check'; + transport: 'http' | 'stdio'; + config?: Record; + params?: Record; + toolInput?: Record; +} + +export class MCPServerError extends Error { + constructor( + message: string, + public options: MCPServerErrorOptions, + ) { + super(message); + this.name = ErrorType.MCPServer; + } +} + +export const isMCPServerError = (value: unknown): value is MCPServerError => { + return value instanceof MCPServerError; +}; + export interface PersistenceErrorOptions extends ErrorOptions { filePath?: string; operation: 'read' | 'write' | 'append' | 'validate'; diff --git a/api/src/llms/interactions/baseInteraction.ts b/api/src/llms/interactions/baseInteraction.ts index 5c08296e..85f613df 100755 --- a/api/src/llms/interactions/baseInteraction.ts +++ b/api/src/llms/interactions/baseInteraction.ts @@ -38,7 +38,8 @@ import type { LLMMessageContentPartToolUseBlock, LLMMessageProviderResponse, } from 'api/llms/llmMessage.ts'; -import { getLLMModelToProvider, type LLMProviderMessageResponseRole } from 'api/types/llms.ts'; +import type { LLMProviderMessageResponseRole } from 'api/types/llms.ts'; +import { getLLMModelToProvider } from 'api/utils/model.ts'; import LLMMessage from 'api/llms/llmMessage.ts'; import type LLMTool from 'api/llms/llmTool.ts'; import type { LLMToolRunResultContent } from 'api/llms/llmTool.ts'; @@ -145,7 +146,7 @@ class LLMInteraction { this._llmProvider = LLMFactory.getProvider( this._interactionCallbacks, this._localMode - //? this._llmModelToProvider[this.projectConfig.defaultModels?.orchestrator ?? 'claude-sonnet-4-20250514'] + //? this._llmModelToProvider[this.projectConfig.defaultModels?.orchestrator ?? 'claude-sonnet-4-5-20250929'] ? this._llmModelToProvider[this._model] : LLMProviderEnum.BB, //globalConfig.api.localMode ? LLMProviderEnum.OPENAI : LLMProviderEnum.BB, @@ -624,19 +625,23 @@ class LLMInteraction { case 'redacted_thinking': return typeof (part as LLMMessageContentPartRedactedThinkingBlock).data === 'string' && (part as LLMMessageContentPartRedactedThinkingBlock).data.trim().length > 0; - case 'tool_use': + case 'tool_use': { const toolUsePart = part as LLMMessageContentPartToolUseBlock; return !!(toolUsePart.id && toolUsePart.name && toolUsePart.input); - case 'tool_result': + } + case 'tool_result': { const toolResultPart = part as LLMMessageContentPartToolResultBlock; return !!(toolResultPart.tool_use_id && (toolResultPart.content || toolResultPart.is_error !== undefined)); - case 'image': + } + case 'image': { const imagePart = part as LLMMessageContentPartImageBlock; return !!(imagePart.source?.data && imagePart.source?.media_type); - case 'audio': + } + case 'audio': { const audioPart = part as LLMMessageContentPartAudioBlock; return !!(audioPart.id); + } default: return false; } @@ -724,24 +729,47 @@ class LLMInteraction { toolRunResultContent: LLMToolRunResultContent, isError: boolean = false, ): string { + // Prepare the content array for the tool result + // Note: tool_result content can only contain text and image blocks + let rawContentArray: LLMMessageContentParts; + if (Array.isArray(toolRunResultContent)) { + rawContentArray = toolRunResultContent; + } else if (typeof toolRunResultContent === 'string') { + rawContentArray = [{ + 'type': 'text', + 'text': toolRunResultContent, + } as LLMMessageContentPartTextBlock]; + } else { + rawContentArray = [toolRunResultContent]; + } + + // Filter to only include text and image blocks (tool_result content requirement) + const contentArray: Array = rawContentArray + .filter((part): part is LLMMessageContentPartTextBlock | LLMMessageContentPartImageBlock => + part.type === 'text' || part.type === 'image' + ); + + // If this is an error, prepend an error message to the content array + // This keeps the error message within the tool_result block instead of creating + // a separate text block, which is required by Anthropic's API (all tool_result + // blocks must come before any text blocks in the message content array) + if (isError) { + const errorMessage = typeof toolRunResultContent === 'string' + ? toolRunResultContent + : JSON.stringify(toolRunResultContent); + contentArray.unshift({ + type: 'text', + text: `The tool run failed: ${errorMessage}`, + } as LLMMessageContentPartTextBlock); + } + const toolResult = { type: 'tool_result', tool_use_id: toolUseId, - content: Array.isArray(toolRunResultContent) ? toolRunResultContent : [ - typeof toolRunResultContent !== 'string' ? toolRunResultContent : { - 'type': 'text', - 'text': toolRunResultContent, - } as LLMMessageContentPartTextBlock, - ], + content: contentArray, is_error: isError, } as LLMMessageContentPartToolResultBlock; // logger.debug('LLMInteraction: Adding tool result', toolResult); - const bbResult = isError - ? { - type: 'text', - text: `The tool run failed: ${toolRunResultContent}`, - } as LLMMessageContentPartTextBlock - : null; const lastMessage = this.getLastMessage(); if (lastMessage && lastMessage.role === 'user') { @@ -779,23 +807,26 @@ class LLMInteraction { return lastMessage.id; } else { // Add new tool result to existing user message + // Note: Error messages are now included in the tool_result's content array + // to comply with Anthropic's requirement that all tool_result blocks must + // come before any text blocks in the message content array logger.debug( 'LLMInteraction: Adding new tool result to existing user message', JSON.stringify(toolResult, null, 2), ); lastMessage.content.push(toolResult); - if (bbResult) lastMessage.content.push(bbResult); return lastMessage.id; } } else { // Add a new user message with the tool result + // Note: Error messages are now included in the tool_result's content array + // to comply with Anthropic's requirement that all tool_result blocks must + // come before any text blocks in the message content array logger.debug( 'LLMInteraction: Adding new user message with tool result', JSON.stringify(toolResult, null, 2), ); - const newMessageContent: LLMMessageContentParts = [toolResult]; - if (bbResult) newMessageContent.push(bbResult); - const newMessage = new LLMMessage('user', newMessageContent, this.interactionStats); + const newMessage = new LLMMessage('user', [toolResult], this.interactionStats); this.addMessage(newMessage); return newMessage.id; } diff --git a/api/src/llms/interactions/chatInteraction.ts b/api/src/llms/interactions/chatInteraction.ts index 989d51a8..5346f408 100644 --- a/api/src/llms/interactions/chatInteraction.ts +++ b/api/src/llms/interactions/chatInteraction.ts @@ -5,6 +5,7 @@ import type { InteractionId } from 'shared/types.ts'; import type { AuxiliaryChatContent } from 'api/logEntries/types.ts'; import { DefaultModelsConfigDefaults } from 'shared/types/models.ts'; import type LLMMessage from 'api/llms/llmMessage.ts'; +import type { LLMMessageContentPart, LLMMessageContentParts } from 'api/llms/llmMessage.ts'; //import type { LLMMessageContentPartTextBlock } from 'api/llms/llmMessage.ts'; import type LLMTool from 'api/llms/llmTool.ts'; //import { extractTextFromContent } from 'api/utils/llms.ts'; @@ -29,7 +30,7 @@ class LLMChatInteraction extends LLMInteraction { } public async chat( - prompt: string, + prompt: string | LLMMessageContentPart | LLMMessageContentParts, speakOptions?: LLMSpeakWithOptions | null, ): Promise { if (!speakOptions) { @@ -43,7 +44,17 @@ class LLMChatInteraction extends LLMInteraction { this._statementTurnCount++; //logger.debug(`chat - calling addMessageForUserRole for turn ${this._statementTurnCount}` ); - const messageId = this.addMessageForUserRole({ type: 'text', text: prompt }); + // Handle different prompt types for multi-modal support + let messageContent: LLMMessageContentParts; + if (typeof prompt === 'string') { + messageContent = [{ type: 'text', text: prompt }]; + } else if (Array.isArray(prompt)) { + messageContent = prompt; + } else { + messageContent = [prompt]; + } + + const messageId = this.addMessageForUserRole(messageContent); //this.collaborationLogger.logAuxiliaryMessage(messageId, null, null, prompt); @@ -61,8 +72,17 @@ class LLMChatInteraction extends LLMInteraction { //const msg = extractTextFromContent(response.messageResponse.answerContent); //const msg = `${prompt}\n${response.messageResponse.answer}`; + // Convert prompt to string for logging + const promptText = typeof prompt === 'string' + ? prompt + : Array.isArray(prompt) + ? prompt.map((p) => p.type === 'text' ? p.text : `[${p.type}]`).join(' ') + : prompt.type === 'text' + ? prompt.text + : `[${prompt.type}]`; + const auxiliaryContent: AuxiliaryChatContent = { - prompt, + prompt: promptText, message: response.messageResponse.answer, purpose: this.title || '', }; diff --git a/api/src/llms/interactions/conversationInteraction.ts b/api/src/llms/interactions/conversationInteraction.ts index b1511e73..e1422988 100644 --- a/api/src/llms/interactions/conversationInteraction.ts +++ b/api/src/llms/interactions/conversationInteraction.ts @@ -180,13 +180,15 @@ class LLMConversationInteraction extends LLMInteraction { _turnIndex: number, ): Promise { try { - logger.info( - `ConversationInteraction: createResourceContentBlocks - resourceUri: ${resourceUri} [${revisionId}]`, - ); const resourceMetadata = this.getResourceRevisionMetadata( generateResourceRevisionKey(resourceUri, revisionId), ); - const content = await this.readResourceContent(resourceUri, revisionId); + logger.info( + `ConversationInteraction: createResourceContentBlocks - resourceUri: ${resourceUri} [${revisionId}] (${ + resourceMetadata?.mimeType || 'unknown mimeType' + })`, + ); + const content = await this.readResourceContent(resourceUri, revisionId, resourceMetadata); if (!resourceMetadata || !content) { throw new Error(`Resource has not been added to conversation: ${resourceUri}`); } @@ -219,6 +221,19 @@ class LLMConversationInteraction extends LLMInteraction { type: 'text', text: `${BB_RESOURCE_METADATA_DELIMITER}\n${JSON.stringify(metadata, null, 2)}`, }; + //logger.info(`ConversationInteraction: createResourceContentBlocks using metadata - resourceUri: ${resourceUri} [${revisionId}]`, metadataBlock); + + // For PDFs, create base64 content block and metadata block + if (resourceMetadata.mimeType === 'application/pdf') { + //logger.info(`ConversationInteraction: createResourceContentBlocks as PDF - resourceUri: ${resourceUri} [${revisionId}]`); + const pdfData = content as Uint8Array; + const base64Data = encodeBase64(pdfData); + const pdfContentBlock: LLMMessageContentPartTextBlock = { + type: 'text', + text: base64Data, // Send as base64 string to prevent corruption + }; + return [metadataBlock, pdfContentBlock]; + } // For images, create image block and metadata block if (resourceMetadata.contentType === 'image') { @@ -267,18 +282,18 @@ class LLMConversationInteraction extends LLMInteraction { public async readResourceContent( resourceUri: string, revisionId: string, - //_resourceMetadata: ResourceRevisionMetadata, + resourceMetadata: ResourceRevisionMetadata | undefined, ): Promise { try { //logger.info(`ConversationInteraction: Reading resource revision from project: ${resourceUri}`); - const content = await this.getResourceRevision(resourceUri, revisionId); + const content = await this.getResourceRevision(resourceUri, revisionId, resourceMetadata); if (content === null) { logger.info(`ConversationInteraction: Reading contents of Resource ${resourceUri}`); const resource = await this.projectData.getProjectResource(resourceUri); if (!resource) { throw new Error(`Resource could not be loaded for: ${resourceUri}`); } - await this.storeResourceRevision(resourceUri, revisionId, resource.content); + await this.storeResourceRevision(resourceUri, revisionId, resource.content, resource.metadata); return resource.content; } logger.info( @@ -307,15 +322,28 @@ class LLMConversationInteraction extends LLMInteraction { } } - async storeResourceRevision(resourceUri: string, revisionId: string, content: string | Uint8Array): Promise { + async storeResourceRevision( + resourceUri: string, + revisionId: string, + content: string | Uint8Array, + resourceMetadata: ResourceRevisionMetadata | undefined, + ): Promise { logger.info(`ConversationInteraction: Storing resource revision: ${resourceUri} Revision: (${revisionId})`); - await this.interactionPersistence.storeResourceRevision(resourceUri, revisionId, content); + await this.interactionPersistence.storeResourceRevision(resourceUri, revisionId, content, resourceMetadata); } - async getResourceRevision(resourceUri: string, revisionId: string): Promise { + async getResourceRevision( + resourceUri: string, + revisionId: string, + resourceMetadata: ResourceRevisionMetadata | undefined, + ): Promise { logger.info(`ConversationInteraction: Getting resource revision: ${resourceUri} Revision: (${revisionId})`); try { - const content = await this.interactionPersistence.getResourceRevision(resourceUri, revisionId); + const content = await this.interactionPersistence.getResourceRevision( + resourceUri, + revisionId, + resourceMetadata, + ); return content; } catch (error) { logger.info( @@ -356,7 +384,7 @@ class LLMConversationInteraction extends LLMInteraction { const resourceMetadata = this.getResourceRevisionMetadata( generateResourceRevisionKey(resourceUri, revisionId), ); - const content = await this.readResourceContent(resourceUri, revisionId); + const content = await this.readResourceContent(resourceUri, revisionId, resourceMetadata); if (!resourceMetadata || !content) { throw new Error(`Resource has not been added to conversation: ${resourceUri}`); } diff --git a/api/src/llms/interactions/interactionManager.ts b/api/src/llms/interactions/interactionManager.ts index 7f91d952..fd5c2af2 100644 --- a/api/src/llms/interactions/interactionManager.ts +++ b/api/src/llms/interactions/interactionManager.ts @@ -7,7 +7,8 @@ import type { InteractionId, InteractionType } from 'shared/types.ts'; import { logger } from 'shared/logger.ts'; import type { LLMCallbacks } from 'api/types.ts'; -class InteractionManager { +export class InteractionManager { + private static instance: InteractionManager; private interactionResults: Map; private interactions: Map; private interactionHierarchy: Map; // child ID to parent ID @@ -18,6 +19,13 @@ class InteractionManager { this.interactionResults = new Map(); } + static getInstance(): InteractionManager { + if (!InteractionManager.instance) { + InteractionManager.instance = new InteractionManager(); + } + return InteractionManager.instance; + } + async createInteraction( collaboration: Collaboration, type: InteractionType, @@ -163,6 +171,5 @@ class InteractionManager { } } -export default InteractionManager; - -export const interactionManager: InteractionManager = new InteractionManager(); +// Export the InteractionManager class so consumers can call InteractionManager.getInstance() +// This avoids circular dependency issues caused by top-level instantiation diff --git a/api/src/llms/llmToolManager.ts b/api/src/llms/llmToolManager.ts index cd848fe1..da77da76 100644 --- a/api/src/llms/llmToolManager.ts +++ b/api/src/llms/llmToolManager.ts @@ -7,7 +7,7 @@ import type LLMConversationInteraction from 'api/llms/conversationInteraction.ts import type { LLMAnswerToolUse } from 'api/llms/llmMessage.ts'; import type LLMTool from 'api/llms/llmTool.ts'; -import type LLMToolMCP from './tools/mcp.tool/tool.ts'; +import type LLMToolMCP from 'api/llms/tools/mcp.tool/tool.ts'; import type { LLMToolInputSchema, LLMToolRunBbResponse, @@ -16,14 +16,15 @@ import type { } from 'api/llms/llmTool.ts'; import { createError, ErrorType } from 'api/utils/error.ts'; -import type { LLMToolMCPConfig } from './tools/mcp.tool/types.ts'; +import type { LLMToolMCPConfig } from 'api/llms/tools/mcp.tool/types.ts'; import type { LLMValidationErrorOptions } from 'api/errors/error.ts'; import { logger } from 'shared/logger.ts'; import type { ProjectConfig } from 'shared/config/types.ts'; import { getBbDir, getGlobalConfigDir } from 'shared/dataDir.ts'; import type { MCPManager } from 'api/mcp/mcpManager.ts'; import { getMCPManager } from 'api/mcp/mcpManager.ts'; -import type { SessionManager } from 'api/auth/session.ts'; +import type { UserContext } from 'shared/types/app.ts'; +import { hasExternalToolsAccess } from 'api/utils/featureAccess.ts'; import { CORE_TOOLS } from './tools_manifest.ts'; @@ -69,15 +70,15 @@ class LLMToolManager { private globalConfigDir: string | undefined; public toolSet: LLMToolManagerToolSetType | LLMToolManagerToolSetType[]; private mcpManager!: MCPManager; - private sessionManager: SessionManager; + private userContext: UserContext; constructor( projectConfig: ProjectConfig, - sessionManager: SessionManager, + userContext: UserContext, toolSet: LLMToolManagerToolSetType | LLMToolManagerToolSetType[] = 'core', ) { this.projectConfig = projectConfig; - this.sessionManager = sessionManager; + this.userContext = userContext; this.toolSet = toolSet; } @@ -202,7 +203,7 @@ class LLMToolManager { logger.debug(`LLMToolManager: Loading tools from ${serverIds.length} MCP servers`); // Check if user has access to external MCP tools - const mcpAccessCheck = await this.sessionManager.hasExternalToolsAccess(); + const mcpAccessCheck = await hasExternalToolsAccess(this.userContext); if (!mcpAccessCheck) { logger.info(`LLMToolManager: MCP tools access denied`); diff --git a/api/src/llms/modelRegistryService.ts b/api/src/llms/modelRegistryService.ts index 3987834d..1e24ef93 100644 --- a/api/src/llms/modelRegistryService.ts +++ b/api/src/llms/modelRegistryService.ts @@ -14,6 +14,25 @@ import type { LLMProviderConfig, ProjectConfig } from 'shared/config/types.ts'; import { getConfigManager } from 'shared/config/configManager.ts'; //import type { PartialTokenPricing } from 'shared/types/models.ts'; +// Generic types for model selection (used across BB, not just MCP) +export interface ModelSelectionPreferences { + /** Cost priority (0.0 = don't care, 1.0 = minimize cost) */ + costPriority?: number; + /** Speed priority (0.0 = don't care, 1.0 = maximize speed) */ + speedPriority?: number; + /** Intelligence priority (0.0 = don't care, 1.0 = maximize capabilities) */ + intelligencePriority?: number; + /** Model hints for preferred models or providers */ + hints?: ModelSelectionHint[]; +} + +export interface ModelSelectionHint { + /** Hint name (model ID, provider name, or capability) */ + name: string; + /** Optional description of why this hint is preferred */ + description?: string; +} + // Import the built-in capabilities data import builtinCapabilities from '../data/modelCapabilities.json' with { type: 'json' }; @@ -81,6 +100,7 @@ export class ModelRegistryService { private providerModels: Map = new Map(); private initialized = false; private llmProviders!: Partial>; + private preferredProviders: string[] = ['anthropic']; /** * Private constructor for singleton pattern @@ -98,6 +118,22 @@ export class ModelRegistryService { return ModelRegistryService.instance; } + /** + * Set preferred providers for model selection + * @param providers Array of provider names in order of preference + */ + public setPreferredProviders(providers: string[]): void { + this.preferredProviders = [...providers]; + logger.info('ModelRegistryService: Updated preferred providers', { providers }); + } + + /** + * Get current preferred providers + */ + public getPreferredProviders(): string[] { + return [...this.preferredProviders]; + } + /** * Initialize the service */ @@ -130,6 +166,7 @@ export class ModelRegistryService { /** * Load static models from built-in capabilities JSON */ + // deno-lint-ignore require-await private async loadStaticModels(): Promise { try { // Destructure to separate metadata from provider data @@ -450,6 +487,375 @@ export class ModelRegistryService { return mapping; } + // ============================================================================ + // SMART MODEL SELECTION METHODS + // ============================================================================ + + /** + * Select the best model based on preferences using intelligent scoring + * This is the main entry point for smart model selection across BB + */ + public selectModelByPreferences(preferences?: ModelSelectionPreferences): string { + //logger.info('ModelRegistryService: selecting model based on preferences', preferences); + if (!this.initialized) { + logger.warn('ModelRegistryService: Service not initialized, returning smart default'); + return this.getSmartDefaultModel(); + } + + // If no preferences provided, use smart default logic + if (!preferences) { + //logger.info('ModelRegistryService: No preferences provided, using smart default'); + return this.getSmartDefaultModel(); + } + + const allModels = this.getAllModels(); + if (allModels.length === 0) { + logger.warn('ModelRegistryService: No models available, returning fallback default'); + return this.getFallbackDefaultModel(); + } + + // Process hints first if provided + if (preferences.hints?.length) { + for (const hint of preferences.hints) { + // Skip hints with invalid names + if (!hint.name || typeof hint.name !== 'string' || hint.name.trim() === '') { + logger.warn('ModelRegistryService: Skipping invalid hint with empty/undefined name:', hint); + continue; + } + + //logger.info('ModelRegistryService: choosing model from hints: ', hint.name); + const mappedModel = this.mapHintToModel(allModels, hint.name); + if (mappedModel) { + //logger.info(`ModelRegistryService: Mapped hint '${hint.name}' to model '${mappedModel}'`); + return mappedModel; + } + } + //logger.info('ModelRegistryService: No suitable model found for hints, using capability priorities'); + } + + // Use capability-based selection + const selectedModel = this.selectModelByCapabilities(allModels, { + cost: preferences.costPriority || 0.5, + speed: preferences.speedPriority || 0.5, + intelligence: preferences.intelligencePriority || 0.5, + }); + + logger.info('ModelRegistryService: Selected model using capability scoring', { + selectedModel, + preferences: { + cost: preferences.costPriority, + speed: preferences.speedPriority, + intelligence: preferences.intelligencePriority, + }, + }); + + return selectedModel; + } + + /** + * Map model hints to available models using comprehensive matching logic + */ + public mapHintToModel(allModels: ModelInfo[], hint: string): string | null { + // Validate input parameters + if (!hint || typeof hint !== 'string' || hint.trim() === '') { + logger.warn('ModelRegistryService: Invalid hint provided to mapHintToModel:', hint); + return null; + } + + if (!allModels || allModels.length === 0) { + logger.warn('ModelRegistryService: No models provided to mapHintToModel'); + return null; + } + + const normalizedHint = hint.trim(); + + // 1. Check for case-insensitive exact model ID match + const caseInsensitiveMatch = allModels.find((m) => m.id.toLowerCase() === normalizedHint.toLowerCase()); + if (caseInsensitiveMatch) { + return caseInsensitiveMatch.id; + } + + // 2. Create dynamic model mappings based on available models + const modelMappings: Record = { + // Claude variants + 'claude-3-sonnet': allModels + .filter((m) => m.id.includes('claude-3') && m.id.includes('sonnet')) + .map((m) => m.id), + 'claude-sonnet': allModels + .filter((m) => m.id.includes('claude') && m.id.includes('sonnet')) + .map((m) => m.id), + 'claude-3-haiku': allModels + .filter((m) => m.id.includes('claude-3') && m.id.includes('haiku')) + .map((m) => m.id), + 'claude-haiku': allModels.filter((m) => m.id.includes('claude') && m.id.includes('haiku')).map((m) => m.id), + 'claude-3-opus': allModels + .filter((m) => m.id.includes('claude-3') && m.id.includes('opus')) + .map((m) => m.id), + 'claude-opus': allModels.filter((m) => m.id.includes('claude') && m.id.includes('opus')).map((m) => m.id), + 'claude': allModels.filter((m) => m.id.includes('claude')).map((m) => m.id), + + // GPT variants + 'gpt-4o': allModels.filter((m) => m.id.includes('gpt-4o')).map((m) => m.id), + 'gpt-4': allModels.filter((m) => m.id.includes('gpt-4')).map((m) => m.id), + 'gpt': allModels.filter((m) => m.id.includes('gpt')).map((m) => m.id), + 'openai': allModels.filter((m) => m.provider === 'openai').map((m) => m.id), + + // Gemini variants + 'gemini-pro': allModels + .filter((m) => m.id.includes('gemini') && m.id.includes('pro')) + .map((m) => m.id), + 'gemini': allModels.filter((m) => m.id.includes('gemini')).map((m) => m.id), + 'google': allModels.filter((m) => m.provider === 'google').map((m) => m.id), + + // Ollama + 'ollama': allModels.filter((m) => m.provider === 'ollama').map((m) => m.id), + }; + + // 3. Check exact pattern matches + const exactMatch = modelMappings[normalizedHint.toLowerCase()]; + if (exactMatch && exactMatch.length > 0) { + // Apply provider preference to pattern matches + const preferredMatch = this.applyProviderPreference( + exactMatch.map((id) => allModels.find((m) => m.id === id)!).filter(Boolean), + ); + return preferredMatch ? preferredMatch.id : exactMatch[0]; + } + + // 4. Check for partial pattern matches + for (const [pattern, models] of Object.entries(modelMappings)) { + if (normalizedHint.toLowerCase().includes(pattern.toLowerCase()) && models.length > 0) { + // Apply provider preference to pattern matches + const preferredMatch = this.applyProviderPreference( + models.map((id) => allModels.find((m) => m.id === id)!).filter(Boolean), + ); + return preferredMatch ? preferredMatch.id : models[0]; + } + } + + // 5. Check if the hint is contained in any model ID + const partialMatch = allModels.find((m) => m.id.toLowerCase().includes(normalizedHint.toLowerCase())); + if (partialMatch) { + return partialMatch.id; + } + + return null; + } + + /** + * Apply provider preference to a list of models + * Returns the first model from the most preferred provider, or null if no models match preferences + */ + private applyProviderPreference(models: ModelInfo[]): ModelInfo | null { + if (models.length === 0) return null; + if (models.length === 1) return models[0]; + + // Try to find models from preferred providers in order + for (const preferredProvider of this.preferredProviders) { + const matchingModels = models.filter((m) => m.provider === preferredProvider); + if (matchingModels.length > 0) { + return matchingModels[0]; // Return first match from this preferred provider + } + } + + // If no preferred provider matches, return the first model + return models[0]; + } + + /** + * Select model based on capability priorities using intelligent scoring + */ + public selectModelByCapabilities( + allModels: ModelInfo[], + priorities: { + cost: number; + speed: number; + intelligence: number; + }, + ): string { + if (allModels.length === 0) { + return this.getFallbackDefaultModel(); + } + + // Score each model based on priorities + let bestModel = allModels[0]; + let bestScore = -1; + + for (const model of allModels) { + const capabilities = model.capabilities; + + // Calculate scores for each priority (0-1 range) + const costScore = this.calculateCostScore(capabilities); + const speedScore = this.calculateSpeedScore(capabilities); + const intelligenceScore = this.calculateIntelligenceScore(capabilities); + + // Weighted combination of scores + let totalScore = costScore * priorities.cost + speedScore * priorities.speed + + intelligenceScore * priorities.intelligence; + + // Apply provider preference bonus (up to 0.1 points) + const providerBonus = this.calculateProviderPreferenceScore(model.provider); + totalScore += providerBonus; + + if (totalScore > bestScore) { + bestScore = totalScore; + bestModel = model; + } + } + + logger.debug('ModelRegistryService: Selected model using capability scoring', { + selectedModel: bestModel.id, + bestScore, + priorities, + preferredProviders: this.preferredProviders, + }); + + return bestModel.id; + } + + /** + * Calculate cost score (lower cost = higher score) + */ + public calculateCostScore(capabilities: ModelCapabilities): number { + const inputPrice = capabilities.token_pricing?.input || 0; + const outputPrice = capabilities.token_pricing?.output || 0; + const avgPrice = (inputPrice + outputPrice) / 2; + + // Normalize price to 0-1 score (assuming max price of $0.10 per 1K tokens) + const maxPrice = 0.1; + return Math.max(0, 1 - avgPrice / maxPrice); + } + + /** + * Calculate speed score based on model response speed rating + */ + public calculateSpeedScore(capabilities: ModelCapabilities): number { + const speedRating = capabilities.responseSpeed || 'medium'; + switch (speedRating) { + case 'very_fast': + return 1.0; + case 'fast': + return 0.8; + case 'medium': + return 0.6; + case 'slow': + return 0.4; + case 'very_slow': + return 0.2; + default: + return 0.6; + } + } + + /** + * Calculate intelligence score based on model capabilities and features + */ + public calculateIntelligenceScore(capabilities: ModelCapabilities): number { + let score = 0.5; // Base score + + // Bonus for advanced features + if (capabilities.supportedFeatures?.functionCalling) score += 0.1; + if (capabilities.supportedFeatures?.json) score += 0.1; + if (capabilities.supportedFeatures?.vision) score += 0.1; + if (capabilities.supportedFeatures?.extendedThinking) score += 0.1; + if (capabilities.supportedFeatures?.promptCaching) score += 0.05; + + // Bonus for larger context windows (normalized to max 200k tokens) + const contextBonus = Math.min(0.15, (capabilities.contextWindow / 200000) * 0.15); + score += contextBonus; + + return Math.min(1.0, score); + } + + /** + * Calculate provider preference score bonus + * Returns a bonus score (0-0.1) based on provider preference ranking + */ + public calculateProviderPreferenceScore(provider: LLMProvider): number { + const providerIndex = this.preferredProviders.indexOf(provider); + if (providerIndex === -1) { + // Provider not in preferences, no bonus + return 0; + } + + // Higher preference = higher bonus (0.1 for first, decreasing by 0.02 for each subsequent) + const maxBonus = 0.1; + const decreasePerRank = 0.02; + return Math.max(0, maxBonus - (providerIndex * decreasePerRank)); + } + + /** + * Get smart default model using intelligent logic instead of hard-coded values + * Selects a balanced model that works well for most use cases + */ + public getSmartDefaultModel(): string { + if (!this.initialized) { + return this.getFallbackDefaultModel(); + } + + const allModels = this.getAllModels(); + if (allModels.length === 0) { + return this.getFallbackDefaultModel(); + } + + // First try to get a model from preferred providers + for (const preferredProvider of this.preferredProviders) { + const providerModels = allModels.filter((m) => m.provider === preferredProvider); + if (providerModels.length > 0) { + // Use balanced priorities for default selection within preferred provider + const balancedPreferences: ModelSelectionPreferences = { + costPriority: 0.3, // Some cost consideration + speedPriority: 0.3, // Some speed consideration + intelligencePriority: 0.7, // Prefer intelligence for general use + }; + + const selectedModel = this.selectModelByCapabilities(providerModels, { + cost: balancedPreferences.costPriority!, + speed: balancedPreferences.speedPriority!, + intelligence: balancedPreferences.intelligencePriority!, + }); + + logger.info('ModelRegistryService: Selected smart default model from preferred provider', { + selectedModel, + preferredProvider, + preferences: balancedPreferences, + }); + + return selectedModel; + } + } + + // Fallback to all models if no preferred provider has models + const balancedPreferences: ModelSelectionPreferences = { + costPriority: 0.3, // Some cost consideration + speedPriority: 0.3, // Some speed consideration + intelligencePriority: 0.7, // Prefer intelligence for general use + }; + + const selectedModel = this.selectModelByCapabilities(allModels, { + cost: balancedPreferences.costPriority!, + speed: balancedPreferences.speedPriority!, + intelligence: balancedPreferences.intelligencePriority!, + }); + + logger.info('ModelRegistryService: Selected smart default model (fallback to all models)', { + selectedModel, + preferences: balancedPreferences, + preferredProviders: this.preferredProviders, + }); + + return selectedModel; + } + + /** + * Get fallback default model when smart logic fails + * This should only be used when ModelRegistryService is not properly initialized + */ + private getFallbackDefaultModel(): string { + // Conservative fallback - should rarely be used + logger.warn('ModelRegistryService: Using hard-coded fallback default model'); + return 'claude-sonnet-4-5-20250929'; + } + /** * Refresh dynamic models (useful for manual updates) */ @@ -472,6 +878,10 @@ export class ModelRegistryService { return models.length > 0 ? models[0].id : undefined; } + // ============================================================================ + // PARAMETER RESOLUTION METHODS (existing) + // ============================================================================ + /** * Resolves a parameter value based on priority */ diff --git a/api/src/llms/providers/anthropicLLM.ts b/api/src/llms/providers/anthropicLLM.ts index 3cd712cf..5e03f999 100755 --- a/api/src/llms/providers/anthropicLLM.ts +++ b/api/src/llms/providers/anthropicLLM.ts @@ -1,5 +1,6 @@ import Anthropic from 'anthropic'; import type { ClientOptions } from 'anthropic'; +import { decodeBase64 } from '@std/encoding'; import { AnthropicModel, LLMCallbackType, LLMProvider } from 'api/types.ts'; import { BB_RESOURCE_METADATA_DELIMITER } from 'api/llms/conversationInteraction.ts'; @@ -245,7 +246,7 @@ class AnthropicLLM extends LLM { const lastThreeUserMessages = userMessages.slice(-3); const lastThreeIndices = new Set(lastThreeUserMessages.map((m) => m.index)); - return messages.map((m, index) => { + const messagesTransformed = messages.map((m, index) => { const prevContent: AnthropicBlockParamOrArray = m.content as AnthropicBlockParamOrArray; let content: AnthropicBlockParamOrArray; @@ -283,6 +284,11 @@ class AnthropicLLM extends LLM { content: content, } as Anthropic.Messages.MessageParam; }); + + // Extract PDF content from tool_result blocks and convert to data blocks + const messagesWithExtractedPdfs = this.extractPdfContentFromMessages(messagesTransformed); + + return messagesWithExtractedPdfs; } private asProviderToolType(tools: LLMTool[]): Anthropic.Messages.Tool[] { @@ -294,6 +300,181 @@ class AnthropicLLM extends LLM { } as Anthropic.Tool)); } + /** + * Extract PDF content from tool_result blocks and convert to 'document' type blocks + * Anthropic tool_result types don't support PDF, but 'document' types do + */ + private extractPdfContentFromMessages( + messages: Array, + ): Array { + return messages.map((message, msgIndex) => { + if (!Array.isArray(message.content)) { + return message; + } + + const transformedContent: Array = []; + const extractedPdfBlocks: Array = []; + + message.content.forEach((block, blockIndex) => { + if (block.type === 'tool_result' && Array.isArray(block.content)) { + // Process tool_result blocks that might contain PDF + const cleanedToolContent: Array< + Anthropic.Messages.TextBlockParam | Anthropic.Messages.ImageBlockParam + > = []; + let foundPdf = false; + + // Handle paired blocks: metadata block + content block + for (let toolIndex = 0; toolIndex < block.content.length; toolIndex++) { + const toolContentBlock = block.content[toolIndex]; + if (toolContentBlock.type === 'text') { + const pdfCheck = this.isPdfContentType(toolContentBlock.text); + // If this block has PDF metadata, check next block for PDF content + if (pdfCheck.isPdf && pdfCheck.metadata) { + logger.info( + `LlmProvider[${this.llmProviderName}]: Found PDF metadata! File: ${pdfCheck.metadata.uri}, Size: ${pdfCheck.metadata.size} bytes`, + ); + + // Check if next block contains PDF content + const nextBlock = block.content[toolIndex + 1]; + if (nextBlock && nextBlock.type === 'text' && this.isPdfContent(nextBlock.text)) { + // Create cleaned text with metadata + note + const note = + `Note: PDF content has been extracted and moved to a separate document block for proper handling by the Anthropic API.`; + const cleanedText = toolContentBlock.text + '\n' + note; + + // Add metadata context as text block + extractedPdfBlocks.push({ + type: 'text', + text: `PDF Document: ${ + pdfCheck.metadata.uri?.split('/').pop() || 'document.pdf' + } (${pdfCheck.metadata.size} bytes, modified: ${pdfCheck.metadata.last_modified})`, + }); + + // Create a 'document' type block for the PDF + extractedPdfBlocks.push({ + type: 'document', + source: { + type: 'base64', + media_type: 'application/pdf', + data: nextBlock.text, + }, + } as any); + + // Add metadata and note to the tool_result indicating PDF was moved + cleanedToolContent.push({ + type: 'text', + text: cleanedText, + }); + + // Skip the next block since we've processed it as PDF content + toolIndex++; // Skip the content block + foundPdf = true; + } else { + // No paired PDF content found, keep metadata as-is + logger.info( + `LlmProvider[${this.llmProviderName}]: PDF metadata found but no paired content block`, + ); + cleanedToolContent.push(toolContentBlock as Anthropic.Messages.TextBlockParam); + } + } else { + // Keep non-PDF text content as-is + cleanedToolContent.push(toolContentBlock as Anthropic.Messages.TextBlockParam); + } + } else if (toolContentBlock.type === 'image') { + // Keep image content as-is + cleanedToolContent.push(toolContentBlock as Anthropic.Messages.ImageBlockParam); + } + // Skip other content types that aren't supported in tool_result content + } + + // Add the cleaned tool_result block + transformedContent.push({ + ...block, + content: cleanedToolContent, + }); + } else { + // Keep non-tool_result blocks as-is + transformedContent.push(block); + } + }); + + // Add extracted PDF blocks after the transformed content + if (extractedPdfBlocks.length > 0) { + logger.info( + `LlmProvider[${this.llmProviderName}]: Adding ${extractedPdfBlocks.length} extracted PDF blocks to message ${ + msgIndex + 1 + }`, + ); + } + transformedContent.push(...extractedPdfBlocks); + + return { + ...message, + content: transformedContent, + }; + }); + } + + /** + * Check if text content contains metadata with application/pdf content-type + */ + private isPdfContentType(text: string): { isPdf: boolean; metadata?: any } { + // Look for bb-resource-metadata blocks using the correct delimiter + // Matches JSON block after metadata marker: captures from opening { to closing }, + // handling one level of nested objects with [^{}]|{[^}]*} pattern + const metadataMatch = text.match( + new RegExp(`${BB_RESOURCE_METADATA_DELIMITER}\\s*\\n((?:{(?:[^{}]|{[^}]*})*})+)`), + ); + if (!metadataMatch) { + return { isPdf: false }; + } + + try { + const metadata = JSON.parse(metadataMatch[1]); + if (metadata.mime_type === 'application/pdf') { + return { isPdf: true, metadata }; + } + } catch (error) { + logger.error(`LlmProvider[${this.llmProviderName}]: Failed to parse metadata:`, error); + } + + return { isPdf: false }; + } + + /** + * Check if text content contains PDF data + */ + private isPdfContent(pdfContent: string): boolean { + // CHECK: Is content base64 encoded? + const isBase64 = /^[A-Za-z0-9+/]*={0,2}$/.test(pdfContent.slice(0, 100)); + + // Verify PDF signature + if (isBase64) { + // Verify PDF signature by checking the first 4 bytes match '%PDF' (0x25504446) + // More reliable than string conversion since we're working with binary data + // and handles potential encoding issues with base64 decoded content + const pdfContentDecoded = decodeBase64(pdfContent); + const pdfSignature = new Uint8Array([0x25, 0x50, 0x44, 0x46]); // '%PDF' + + // Using every() to iterate through our 4-byte signature array and compare each byte + // against the corresponding position in the decoded content - this only checks the + // start of the file (bytes 0-3), not scanning the entire array + const isPdfSignature = pdfContentDecoded.length >= 4 && + pdfSignature.every((byte, index) => pdfContentDecoded[index] === byte); + + if (isPdfSignature) { + logger.info(`LlmProvider[${this.llmProviderName}]: PDF signature verified`); + return true; + } else { + logger.warn(`LlmProvider[${this.llmProviderName}]: No PDF signature found in content block`); + return false; + } + } else { + logger.warn(`LlmProvider[${this.llmProviderName}]: PDF is not base64 encoded`); + return false; + } + } + //// deno-lint-ignore require-await override async asProviderMessageRequest( messageRequest: LLMProviderMessageRequest, @@ -373,7 +554,12 @@ class AnthropicLLM extends LLM { model, max_tokens: maxTokens, temperature, - betas: ['output-128k-2025-02-19', 'token-efficient-tools-2025-02-19', 'interleaved-thinking-2025-05-14'], + betas: [ + 'context-1m-2025-08-07', + 'output-128k-2025-02-19', + 'token-efficient-tools-2025-02-19', + 'interleaved-thinking-2025-05-14', + ], //stream: false, // Add extended thinking support if enabled in the request @@ -431,7 +617,7 @@ class AnthropicLLM extends LLM { { headers: { 'anthropic-beta': - 'output-128k-2025-02-19,token-efficient-tools-2025-02-19,interleaved-thinking-2025-05-14', + 'context-1m-2025-08-07,output-128k-2025-02-19,token-efficient-tools-2025-02-19,interleaved-thinking-2025-05-14', }, }, ).withResponse(); diff --git a/api/src/llms/providers/bbLLM.ts b/api/src/llms/providers/bbLLM.ts index 08ba2193..931bb901 100644 --- a/api/src/llms/providers/bbLLM.ts +++ b/api/src/llms/providers/bbLLM.ts @@ -1,4 +1,4 @@ -import type { SupabaseClient } from '@supabase/supabase-js'; +// import type { SupabaseClient } from '@supabase/supabase-js'; //import { FunctionsHttpError, FunctionsRelayError, FunctionsFetchError } from "@supabase/supabase-js"; import { AnthropicModel, LLMCallbackType, LLMProvider } from 'api/types.ts'; @@ -13,6 +13,7 @@ import type { } from 'api/llms/llmMessage.ts'; import type LLMTool from 'api/llms/llmTool.ts'; import { createError } from 'api/utils/error.ts'; +import { errorMessage, isError } from 'shared/error.ts'; import { ErrorType, isLLMError, type LLMErrorOptions } from 'api/errors/error.ts'; import { logger } from 'shared/logger.ts'; import type { @@ -37,15 +38,15 @@ type LLMMessageContentPartOrString = // | LLMMessageContentPart // >; class BbLLM extends LLM { - private supabaseClient!: SupabaseClient; + //private supabaseClient!: SupabaseClient; constructor(callbacks: LLMCallbacks) { super(callbacks); this.llmProviderName = LLMProvider.BB; - const projectEditor = this.invokeSync(LLMCallbackType.PROJECT_EDITOR); - this.supabaseClient = projectEditor.sessionManager.supabaseClient; + //const projectEditor = this.invokeSync(LLMCallbackType.PROJECT_EDITOR); + //this.supabaseClient = projectEditor.userContext.userAuthSession.getClient(); } // Helper function to check for file metadata blocks @@ -280,10 +281,11 @@ class BbLLM extends LLM { /** * Determines whether to use direct fetch or Supabase edge function */ - private shouldUseDirectFetch(): boolean { - // Check if useFallback is configured for Supabase - return Boolean(!this.projectConfig.api?.llmProviders?.beyondbetter?.config?.useFallback); - } + // supabase function is retired + // private shouldUseDirectFetch(): boolean { + // // Check if useFallback is configured for Supabase + // return Boolean(!this.projectConfig.api?.llmProviders?.beyondbetter?.config?.useFallback); + // } /** * Get the base URL for direct fetch calls @@ -298,16 +300,18 @@ class BbLLM extends LLM { */ private async makeDirectFetchCall( providerMessageRequest: LLMProviderMessageRequest, + // deno-lint-ignore no-explicit-any ): Promise<{ data: BBLLMResponse | null; error: any }> { + const FETCH_TIMEOUT_MINS = 15; try { const baseUrl = this.getBaseUrl(); // Get session manager to retrieve auth headers const projectEditor = this.invokeSync(LLMCallbackType.PROJECT_EDITOR); - const sessionManager = projectEditor.sessionManager; + const userAuthSession = projectEditor.userContext.userAuthSession; // Get current session for authentication - const session = await sessionManager.getSession(); + const session = await userAuthSession.getSession(); //logger.info(`BbLLM:provider[${this.llmProviderName}]: session`, session); // Prepare headers - same as what Supabase edge functions receive @@ -324,6 +328,7 @@ class BbLLM extends LLM { method: 'POST', headers, body: JSON.stringify(providerMessageRequest), + signal: AbortSignal.timeout(FETCH_TIMEOUT_MINS * 60 * 1000), }); if (!response.ok) { @@ -350,12 +355,19 @@ class BbLLM extends LLM { const data = await response.json() as BBLLMResponse; return { data, error: null }; } catch (error) { + let errorMsg = errorMessage(error); + if (isError(error) && error.name === 'TimeoutError') { + errorMsg = `Timeout: It took more than ${FETCH_TIMEOUT_MINS} mins to get the result: ${errorMsg}`; + } else if (isError(error) && error.name === 'AbortError') { + errorMsg = `Fetch aborted by explicit action: ${errorMsg}`; + } + logger.error(`BbLLM:provider[${this.llmProviderName}]: fetch failed: ${errorMsg}`); return { data: null, error: { - message: (error as Error).message, + message: errorMsg, context: { - json: () => Promise.resolve({ message: (error as Error).message }), + json: () => Promise.resolve({ message: errorMsg }), }, }, }; @@ -380,24 +392,29 @@ class BbLLM extends LLM { interaction, ); + logger.info(`BbLLM:provider[${this.llmProviderName}]: Using direct fetch to ${this.getBaseUrl()}`); + const result = await this.makeDirectFetchCall(providerMessageRequest); + const data: BBLLMResponse | null = result.data; + const error = result.error; + // Choose between direct fetch and Supabase edge function - let data: BBLLMResponse | null; - let error: any; - - if (this.shouldUseDirectFetch()) { - logger.info(`BbLLM:provider[${this.llmProviderName}]: Using direct fetch to ${this.getBaseUrl()}`); - const result = await this.makeDirectFetchCall(providerMessageRequest); - data = result.data; - error = result.error; - } else { - logger.info(`BbLLM:provider[${this.llmProviderName}]: Using Supabase edge function`); - //const { data, error } : {data:BBLLMResponse ; error: FunctionsHttpError, FunctionsRelayError, FunctionsFetchError} = await this.supabaseClient.functions.invoke('llm-proxy', { - const result = await this.supabaseClient.functions.invoke('llm-proxy', { - body: providerMessageRequest, - }); - data = result.data; - error = result.error; - } + // let data: BBLLMResponse | null; + // let error: any; + // + // if (this.shouldUseDirectFetch()) { + // logger.info(`BbLLM:provider[${this.llmProviderName}]: Using direct fetch to ${this.getBaseUrl()}`); + // const result = await this.makeDirectFetchCall(providerMessageRequest); + // data = result.data; + // error = result.error; + // } else { + // logger.info(`BbLLM:provider[${this.llmProviderName}]: Using Supabase edge function`); + // //const { data, error } : {data:BBLLMResponse ; error: FunctionsHttpError, FunctionsRelayError, FunctionsFetchError} = await this.supabaseClient.functions.invoke('llm-proxy', { + // const result = await this.supabaseClient.functions.invoke('llm-proxy', { + // body: providerMessageRequest, + // }); + // data = result.data; + // error = result.error; + // } //logger.info(`BbLLM:provider[${this.llmProviderName}]: llms-bb-data`, data); //logger.info(`BbLLM:provider[${this.llmProviderName}]: llms-bb-error`, error); diff --git a/api/src/llms/providers/openAILLM.ts b/api/src/llms/providers/openAILLM.ts index 33dd153c..19055d2a 100644 --- a/api/src/llms/providers/openAILLM.ts +++ b/api/src/llms/providers/openAILLM.ts @@ -93,16 +93,53 @@ class OpenAILLM extends OpenAICompatLLM { ): void {} /** - * Checks if the model is an OpenAI o3 model that requires max_completion_tokens - * instead of max_tokens parameter + * Model parameter configurations for different OpenAI model families. + * Uses regex patterns to determine which token parameter format to use. */ - private isO3Model(model: string): boolean { - return model.toLowerCase().includes('o3'); + private static readonly MODEL_PARAMETER_CONFIGS = { + // Newer models that use max_completion_tokens + max_completion_tokens: [ + /^gpt-o3/i, // o3 models + /^gpt-5/i, // GPT-5 models + /^o1/i, // o1 models (reasoning models) + ], + // Traditional models that use max_tokens (fallback) + max_tokens: [ + /^gpt-4(?!.*o3)/i, // GPT-4 models (excluding o3 variants) + /^gpt-3\.5/i, // GPT-3.5 models + ], + } as const; + + /** + * Determines which token parameter name to use based on the model. + * Uses pattern matching to categorize models into families/generations. + * + * @param model - The model name to check + * @returns The appropriate token parameter name + */ + private getTokenParameterName(model: string): 'max_tokens' | 'max_completion_tokens' { + // Check for max_completion_tokens patterns first (newer models) + const usesMaxCompletionTokens = OpenAILLM.MODEL_PARAMETER_CONFIGS.max_completion_tokens + .some((pattern) => pattern.test(model)); + + if (usesMaxCompletionTokens) { + return 'max_completion_tokens'; + } + + // Default to max_tokens for older/unrecognized models + return 'max_tokens'; + } + + /** + * Checks if the model requires max_completion_tokens instead of max_tokens + */ + private usesMaxCompletionTokens(model: string): boolean { + return this.getTokenParameterName(model) === 'max_completion_tokens'; } /** * Override the base class method to handle OpenAI-specific parameter mapping - * for o3 models that require max_completion_tokens instead of max_tokens + * for models that require max_completion_tokens instead of max_tokens */ override async asProviderMessageRequest( messageRequest: LLMProviderMessageRequest, @@ -111,8 +148,8 @@ class OpenAILLM extends OpenAICompatLLM { // Get the base request from the parent class const baseRequest = await super.asProviderMessageRequest(messageRequest, interaction); - // For o3 models, we need to use max_completion_tokens instead of max_tokens - if (this.isO3Model(baseRequest.model)) { + // For models that use max_completion_tokens, transform the parameter + if (this.usesMaxCompletionTokens(baseRequest.model)) { const { max_tokens, ...requestWithoutMaxTokens } = baseRequest; return { ...requestWithoutMaxTokens, @@ -121,7 +158,7 @@ class OpenAILLM extends OpenAICompatLLM { }; } - // For other OpenAI models, use the original request + // For models that use max_tokens, use the original request return baseRequest; } } diff --git a/api/src/llms/tools/applyPatch.tool/types.ts b/api/src/llms/tools/applyPatch.tool/types.ts index fd0e78c0..b5bdec38 100644 --- a/api/src/llms/tools/applyPatch.tool/types.ts +++ b/api/src/llms/tools/applyPatch.tool/types.ts @@ -21,6 +21,6 @@ export interface LLMToolApplyPatchResponseData { } export interface LLMToolApplyPatchResult { - toolResult: LLMToolRunResultContent; + toolResults: LLMToolRunResultContent; bbResponse: LLMToolApplyPatchResponseData; } diff --git a/api/src/llms/tools/blockEdit.tool/formatter.browser.tsx b/api/src/llms/tools/blockEdit.tool/formatter.browser.tsx index c2c22257..66a81d01 100644 --- a/api/src/llms/tools/blockEdit.tool/formatter.browser.tsx +++ b/api/src/llms/tools/blockEdit.tool/formatter.browser.tsx @@ -81,8 +81,8 @@ export const formatLogEntryToolUse = (toolInput: LLMToolInputSchema): LLMToolLog export const formatLogEntryToolResult = ( resultContent: CollaborationLogEntryContentToolResult, ): LLMToolLogEntryFormattedResult => { - const { toolResult, bbResponse } = resultContent as unknown as LLMToolBlockEditResult; - const results = getContentArrayFromToolResult(toolResult); + const { toolResults, bbResponse } = resultContent as unknown as LLMToolBlockEditResult; + const results = getContentArrayFromToolResult(toolResults); // Check if operation was successful const isSuccess = !bbResponse.toLowerCase().includes('error') && diff --git a/api/src/llms/tools/blockEdit.tool/formatter.console.ts b/api/src/llms/tools/blockEdit.tool/formatter.console.ts index e4eb952e..79d5badc 100644 --- a/api/src/llms/tools/blockEdit.tool/formatter.console.ts +++ b/api/src/llms/tools/blockEdit.tool/formatter.console.ts @@ -87,8 +87,8 @@ export const formatLogEntryToolUse = (toolInput: LLMToolInputSchema): LLMToolLog export const formatLogEntryToolResult = ( resultContent: CollaborationLogEntryContentToolResult, ): LLMToolLogEntryFormattedResult => { - const { toolResult, bbResponse } = resultContent as LLMToolBlockEditResult; - const results = getContentArrayFromToolResult(toolResult); + const { toolResults, bbResponse } = resultContent as LLMToolBlockEditResult; + const results = getContentArrayFromToolResult(toolResults); // Check if operation was successful const isSuccess = !bbResponse.toLowerCase().includes('error') && !bbResponse.toLowerCase().includes('failed'); diff --git a/api/src/llms/tools/blockEdit.tool/types.ts b/api/src/llms/tools/blockEdit.tool/types.ts index 7e867154..98b25324 100644 --- a/api/src/llms/tools/blockEdit.tool/types.ts +++ b/api/src/llms/tools/blockEdit.tool/types.ts @@ -51,7 +51,7 @@ export interface LLMToolBlockEditResponseData { * Result of a block_edit tool operation. */ export interface LLMToolBlockEditResult { - toolResult: LLMToolRunResultContent; + toolResults: LLMToolRunResultContent; bbResponse: string; //bbResponse: LLMToolBlockEditResponseData; } diff --git a/api/src/llms/tools/delegateTasks.tool/tool.ts b/api/src/llms/tools/delegateTasks.tool/tool.ts index a3d3e55c..45da6b3d 100644 --- a/api/src/llms/tools/delegateTasks.tool/tool.ts +++ b/api/src/llms/tools/delegateTasks.tool/tool.ts @@ -21,7 +21,7 @@ import { logger } from 'shared/logger.ts'; import type { LLMAnswerToolUse, LLMMessageContentParts } from 'api/llms/llmMessage.ts'; import type ProjectEditor from 'api/editor/projectEditor.ts'; //import { createError, ErrorType } from 'api/utils/error.ts'; -//import type InteractionManager from 'api/llms/interactionManager.ts'; +//import type { InteractionManager } from 'api/llms/interactionManager.ts'; import type OrchestratorController from 'api/controllers/orchestratorController.ts'; //import { ResourceManager } from 'api/resources/resourceManager.ts'; //import { AgentCapabilityManager } from '../../../llms/agentCapabilityManager.ts'; diff --git a/api/src/llms/tools/delegateTasks.tool/types.ts b/api/src/llms/tools/delegateTasks.tool/types.ts index 71ef1e1e..7d4d6c37 100644 --- a/api/src/llms/tools/delegateTasks.tool/types.ts +++ b/api/src/llms/tools/delegateTasks.tool/types.ts @@ -38,6 +38,6 @@ export interface LLMToolDelegateTasksResponseData { } export interface LLMToolDelegateTasksResult { - toolResult: LLMToolRunResultContent; + toolResults: LLMToolRunResultContent; bbResponse: LLMToolDelegateTasksResponseData & LLMToolRunBbResponse; } diff --git a/api/src/llms/tools/displayResource.tool/tool.ts b/api/src/llms/tools/displayResource.tool/tool.ts index 7c294f98..bc845999 100644 --- a/api/src/llms/tools/displayResource.tool/tool.ts +++ b/api/src/llms/tools/displayResource.tool/tool.ts @@ -52,7 +52,7 @@ export default class LLMToolDisplayResource extends LLMTool { dataSourceId: { type: 'string', description: - "Data source ID to operate on. Defaults to the primary data source if omitted. Examples: 'primary', 'filesystem-1', 'db-staging'. Data sources are identified by their name (e.g., 'primary', 'local-2', 'supabase').", + "Data source ID to operate on. Defaults to the primary data source if omitted. Examples: 'primary', 'filesystem-1', 'db-staging'. Data sources are identified by their name (e.g., 'primary', 'local-2', 'supabase'). **IMPORTANT: Different data sources have different path format requirements - use loadDataSource with returnType='instructions' and operations=['utility'] to get provider-specific resource path guidance.**", }, resourcePath: { type: 'string', diff --git a/api/src/llms/tools/displayResource.tool/types.ts b/api/src/llms/tools/displayResource.tool/types.ts index 63732f2b..29ef47c6 100644 --- a/api/src/llms/tools/displayResource.tool/types.ts +++ b/api/src/llms/tools/displayResource.tool/types.ts @@ -40,7 +40,7 @@ export interface LLMToolDisplayResourceResponseData { } export interface LLMToolDisplayResourceResult { - toolResult: LLMToolRunResultContent; + toolResults: LLMToolRunResultContent; bbResponse: LLMToolDisplayResourceResponseData; } diff --git a/api/src/llms/tools/downloadResource.tool/formatter.browser.tsx b/api/src/llms/tools/downloadResource.tool/formatter.browser.tsx new file mode 100644 index 00000000..1071cb48 --- /dev/null +++ b/api/src/llms/tools/downloadResource.tool/formatter.browser.tsx @@ -0,0 +1,243 @@ +/** @jsxImportSource preact */ +import LLMTool from 'api/llms/llmTool.ts'; +import type { LLMToolInputSchema, LLMToolLogEntryFormattedResult } from 'api/llms/llmTool.ts'; +import type { CollaborationLogEntryContentToolResult } from 'shared/types.ts'; +import type { LLMToolDownloadResourceInput, LLMToolDownloadResourceResultData } from './types.ts'; + +export const formatLogEntryToolUse = (toolInput: LLMToolInputSchema): LLMToolLogEntryFormattedResult => { + const { + url, + method = 'GET', + headers, + auth, + queryParams, + requestBody, + dataSourceId, + resourcePath, + overwriteExisting, + createMissingDirectories, + includeInMessages, + followRedirects, + maxRedirects, + timeout, + maxFileSize, + } = toolInput as LLMToolDownloadResourceInput; + + // Create options array for display + const options: string[] = []; + if (overwriteExisting) options.push('overwrite existing'); + if (createMissingDirectories) options.push('create directories'); + if (includeInMessages) options.push('include in messages'); + if (followRedirects === false) options.push('no redirects'); + if (maxRedirects && maxRedirects !== 10) options.push(`max redirects: ${maxRedirects}`); + if (timeout && timeout !== 30000) options.push(`timeout: ${timeout / 1000}s`); + if (maxFileSize && maxFileSize !== 50 * 1024 * 1024) { + options.push(`max size: ${Math.round(maxFileSize / 1024 / 1024)}MB`); + } + + // Determine auth description + let authDescription = ''; + if (auth && auth.type !== 'none') { + switch (auth.type) { + case 'basic': + authDescription = 'Basic Auth'; + break; + case 'bearer': + authDescription = 'Bearer Token'; + break; + case 'apikey': + authDescription = `API Key ${auth.useQueryParam ? '(query param)' : '(header)'}`; + break; + } + } + + const formattedContent = LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Method:')} {LLMTool.TOOL_TAGS_BROWSER.base.text(method)} + , + )} + {LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('URL:')} {LLMTool.TOOL_TAGS_BROWSER.content.url(url)} + , + )} + {LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Save to:')}{' '} + {dataSourceId && <>{LLMTool.TOOL_TAGS_BROWSER.base.text(dataSourceId + ':')}} + {LLMTool.TOOL_TAGS_BROWSER.content.filename(resourcePath)} + , + )} + {authDescription && LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Authentication:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.base.text(authDescription)} + , + )} + {queryParams && Object.keys(queryParams).length > 0 && LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Query Parameters:')} +
+ {Object.entries(queryParams).map(([key, value]) => ( +
+ {LLMTool.TOOL_TAGS_BROWSER.base.text(key)}: {LLMTool.TOOL_TAGS_BROWSER.base.text(value)} +
+ ))} +
+ , + )} + {headers && Object.keys(headers).length > 0 && LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Headers:')} +
+ {Object.entries(headers).map(([key, value]) => ( +
+ {LLMTool.TOOL_TAGS_BROWSER.base.text(key)}: {LLMTool.TOOL_TAGS_BROWSER.base.text(value)} +
+ ))} +
+ , + )} + {requestBody && LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Request Body:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.base.text(requestBody.contentType)} + {LLMTool.TOOL_TAGS_BROWSER.base.pre( + requestBody.content.length > 200 + ? `${requestBody.content.substring(0, 200)}...` + : requestBody.content, + )} + , + `${LLMTool.TOOL_STYLES_BROWSER.base.container} ${LLMTool.TOOL_STYLES_BROWSER.content.code}`, + )} + {options.length > 0 && LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Options:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.base.text(options.join(', '))} + , + )} + , + LLMTool.TOOL_STYLES_BROWSER.base.container, + ); + + return { + title: LLMTool.TOOL_TAGS_BROWSER.content.title('Tool Use', 'Download Resource'), + subtitle: LLMTool.TOOL_TAGS_BROWSER.content.subtitle(`${method} ${url} → ${resourcePath}`), + content: formattedContent, + preview: `Download ${method} ${url} to ${resourcePath}`, + }; +}; + +export const formatLogEntryToolResult = ( + resultContent: CollaborationLogEntryContentToolResult, +): LLMToolLogEntryFormattedResult => { + const { bbResponse } = resultContent; + + if (typeof bbResponse === 'object' && 'data' in bbResponse) { + const data = bbResponse.data as LLMToolDownloadResourceResultData; + const { response, bytesDownloaded, durationMs, isNewResource, dataSource } = data; + const { contentTypeInfo } = response; + const operation = isNewResource ? 'Downloaded' : 'Updated'; + + const content = LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label(`✅ ${operation} successfully:`)} +
{LLMTool.TOOL_TAGS_BROWSER.content.url(data.url)}
+ {response.finalUrl !== data.url && ( +
+ {LLMTool.TOOL_TAGS_BROWSER.base.text('→ ')} + {LLMTool.TOOL_TAGS_BROWSER.content.url(response.finalUrl)} +
+ )} + , + )} + {LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Saved to:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.base.text(dataSource.dsConnectionName + ':')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.content.filename(data.resourcePath)} + , + )} + {LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Content-Type:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.base.text(contentTypeInfo.mimeType)}{' '} + {LLMTool.TOOL_TAGS_BROWSER.base.text(`(${contentTypeInfo.contentType})`)} + , + )} + {LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Size:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.content.bytes(bytesDownloaded)} + , + )} + {LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Duration:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.content.duration(durationMs)} + , + )} + {LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('HTTP Status:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.base.text(`${response.status} ${response.statusText}`)} + , + )} + {response.redirectCount > 0 && LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Redirects:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.content.number(response.redirectCount)} + , + )} + {response.contentLength && LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Content-Length:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.content.bytes(response.contentLength)} + , + )} + {contentTypeInfo.warningMessage && LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('⚠️ Warning:')}{' '} + {LLMTool.TOOL_TAGS_BROWSER.base.text(contentTypeInfo.warningMessage)} + , + `${LLMTool.TOOL_STYLES_BROWSER.base.container} ${LLMTool.TOOL_STYLES_BROWSER.status.warning}`, + )} + {data.conversationContent && LLMTool.TOOL_TAGS_BROWSER.base.container( + <> + {LLMTool.TOOL_TAGS_BROWSER.base.label('Content included in messages:')} + {LLMTool.TOOL_TAGS_BROWSER.base.pre( + data.conversationContent.length > 500 + ? `${data.conversationContent.substring(0, 500)}...` + : data.conversationContent, + )} + , + `${LLMTool.TOOL_STYLES_BROWSER.base.container} ${LLMTool.TOOL_STYLES_BROWSER.content.code}`, + )} + , + `${LLMTool.TOOL_STYLES_BROWSER.base.container} ${LLMTool.TOOL_STYLES_BROWSER.status.success}`, + ); + + return { + title: LLMTool.TOOL_TAGS_BROWSER.content.title('Tool Result', 'Download Resource'), + subtitle: LLMTool.TOOL_TAGS_BROWSER.content.subtitle(`${operation} ${data.resourcePath}`), + content, + preview: `${operation} ${bytesDownloaded} bytes in ${durationMs}ms`, + }; + } else { + const content = LLMTool.TOOL_TAGS_BROWSER.base.container( + LLMTool.TOOL_TAGS_BROWSER.base.label(String(bbResponse)), + `${LLMTool.TOOL_STYLES_BROWSER.base.container} ${LLMTool.TOOL_STYLES_BROWSER.status.error}`, + ); + + return { + title: LLMTool.TOOL_TAGS_BROWSER.content.title('Tool Result', 'Download Resource'), + subtitle: LLMTool.TOOL_TAGS_BROWSER.content.subtitle('failed'), + content, + preview: 'Download failed', + }; + } +}; diff --git a/api/src/llms/tools/downloadResource.tool/formatter.console.ts b/api/src/llms/tools/downloadResource.tool/formatter.console.ts new file mode 100644 index 00000000..65c25f10 --- /dev/null +++ b/api/src/llms/tools/downloadResource.tool/formatter.console.ts @@ -0,0 +1,192 @@ +import LLMTool from 'api/llms/llmTool.ts'; +import type { LLMToolInputSchema, LLMToolLogEntryFormattedResult } from 'api/llms/llmTool.ts'; +import type { CollaborationLogEntryContentToolResult } from 'shared/types.ts'; +import type { LLMToolDownloadResourceInput, LLMToolDownloadResourceResultData } from './types.ts'; +import { stripIndents } from 'common-tags'; + +export const formatLogEntryToolUse = (toolInput: LLMToolInputSchema): LLMToolLogEntryFormattedResult => { + const { + url, + method = 'GET', + headers, + auth, + queryParams, + requestBody, + dataSourceId, + resourcePath, + overwriteExisting, + createMissingDirectories, + includeInMessages, + followRedirects, + maxRedirects, + timeout, + maxFileSize, + } = toolInput as LLMToolDownloadResourceInput; + + // Determine auth description + let authDescription = ''; + if (auth && auth.type !== 'none') { + switch (auth.type) { + case 'basic': + authDescription = 'Basic Authentication'; + break; + case 'bearer': + authDescription = 'Bearer Token'; + break; + case 'apikey': + authDescription = `API Key ${auth.useQueryParam ? '(query parameter)' : '(header)'}`; + break; + } + } + + // Create options array for display + const options: string[] = []; + if (overwriteExisting) options.push('overwrite existing'); + if (createMissingDirectories) options.push('create directories'); + if (includeInMessages) options.push('include in messages'); + if (followRedirects === false) options.push('no redirects'); + if (maxRedirects && maxRedirects !== 10) options.push(`max redirects: ${maxRedirects}`); + if (timeout && timeout !== 30000) options.push(`timeout: ${timeout / 1000}s`); + if (maxFileSize && maxFileSize !== 50 * 1024 * 1024) { + options.push(`max size: ${Math.round(maxFileSize / 1024 / 1024)}MB`); + } + + let formattedContent = stripIndents` + ${LLMTool.TOOL_STYLES_CONSOLE.base.label('Method:')} ${LLMTool.TOOL_STYLES_CONSOLE.content.data(method)} + ${LLMTool.TOOL_STYLES_CONSOLE.base.label('URL:')} ${LLMTool.TOOL_STYLES_CONSOLE.content.url(url)} + ${LLMTool.TOOL_STYLES_CONSOLE.base.label('Save to:')} ${ + dataSourceId ? LLMTool.TOOL_STYLES_CONSOLE.content.data(dataSourceId + ':') + ' ' : '' + }${LLMTool.TOOL_STYLES_CONSOLE.content.filename(resourcePath)} + `; + + if (authDescription) { + formattedContent += `\n${LLMTool.TOOL_STYLES_CONSOLE.base.label('Authentication:')} ${ + LLMTool.TOOL_STYLES_CONSOLE.content.data(authDescription) + }`; + } + + if (queryParams && Object.keys(queryParams).length > 0) { + formattedContent += `\n${LLMTool.TOOL_STYLES_CONSOLE.base.label('Query Parameters:')}`; + Object.entries(queryParams).forEach(([key, value]) => { + formattedContent += `\n ${LLMTool.TOOL_STYLES_CONSOLE.content.data(key)}: ${ + LLMTool.TOOL_STYLES_CONSOLE.content.data(value) + }`; + }); + } + + if (headers && Object.keys(headers).length > 0) { + formattedContent += `\n${LLMTool.TOOL_STYLES_CONSOLE.base.label('Headers:')}`; + Object.entries(headers).forEach(([key, value]) => { + formattedContent += `\n ${LLMTool.TOOL_STYLES_CONSOLE.content.data(key)}: ${ + LLMTool.TOOL_STYLES_CONSOLE.content.data(value) + }`; + }); + } + + if (requestBody) { + formattedContent += `\n${LLMTool.TOOL_STYLES_CONSOLE.base.label('Request Body:')} ${ + LLMTool.TOOL_STYLES_CONSOLE.content.data(requestBody.contentType) + }`; + const bodyPreview = requestBody.content.length > 200 + ? `${requestBody.content.substring(0, 200)}...` + : requestBody.content; + formattedContent += `\n\n${LLMTool.TOOL_STYLES_CONSOLE.content.code(bodyPreview)}`; + } + + if (options.length > 0) { + formattedContent += `\n${LLMTool.TOOL_STYLES_CONSOLE.base.label('Options:')} ${ + LLMTool.TOOL_STYLES_CONSOLE.content.data(options.join(', ')) + }`; + } + + return { + title: LLMTool.TOOL_STYLES_CONSOLE.content.title('Tool Use', 'Download Resource'), + subtitle: LLMTool.TOOL_STYLES_CONSOLE.content.subtitle(`${method} ${url} → ${resourcePath}`), + content: formattedContent, + preview: `Download ${method} ${url} to ${resourcePath}`, + }; +}; + +export const formatLogEntryToolResult = ( + resultContent: CollaborationLogEntryContentToolResult, +): LLMToolLogEntryFormattedResult => { + const { bbResponse } = resultContent; + + if (typeof bbResponse === 'object' && 'data' in bbResponse) { + const data = bbResponse.data as LLMToolDownloadResourceResultData; + const { response, bytesDownloaded, durationMs, isNewResource, dataSource } = data; + const { contentTypeInfo } = response; + const operation = isNewResource ? 'Downloaded' : 'Updated'; + + let content = stripIndents` + ${LLMTool.TOOL_STYLES_CONSOLE.base.label(`✅ ${operation} successfully:`)} + ${LLMTool.TOOL_STYLES_CONSOLE.content.url(data.url)} + `; + + // Show redirect chain if applicable + if (response.finalUrl !== data.url) { + content += `\n${LLMTool.TOOL_STYLES_CONSOLE.base.label('Final URL:')} ${ + LLMTool.TOOL_STYLES_CONSOLE.content.data(response.finalUrl) + }`; + } + + content += stripIndents` + + ${LLMTool.TOOL_STYLES_CONSOLE.base.label('Saved to:')} ${ + LLMTool.TOOL_STYLES_CONSOLE.content.data(dataSource.dsConnectionName + ':') + } ${LLMTool.TOOL_STYLES_CONSOLE.content.filename(data.resourcePath)} + ${LLMTool.TOOL_STYLES_CONSOLE.base.label('Resource ID:')} ${ + LLMTool.TOOL_STYLES_CONSOLE.content.data(data.resourceId) + } + `; + + // Add metadata + content += stripIndents` + + ${LLMTool.TOOL_STYLES_CONSOLE.base.label('Details:')} + Content-Type: ${LLMTool.TOOL_STYLES_CONSOLE.content.data(contentTypeInfo.mimeType)} ${ + LLMTool.TOOL_STYLES_CONSOLE.content.data('(' + contentTypeInfo.contentType + ')') + } + Size: ${LLMTool.TOOL_STYLES_CONSOLE.content.bytes(bytesDownloaded)} + Duration: ${LLMTool.TOOL_STYLES_CONSOLE.content.duration(durationMs)} + HTTP Status: ${LLMTool.TOOL_STYLES_CONSOLE.content.data(response.status + ' ' + response.statusText)} + `; + + // Add optional details + if (response.redirectCount > 0) { + content += `\n Redirects: ${LLMTool.TOOL_STYLES_CONSOLE.content.number(response.redirectCount)}`; + } + + if (response.contentLength) { + content += `\n Content-Length: ${LLMTool.TOOL_STYLES_CONSOLE.content.bytes(response.contentLength)}`; + } + + // Add warnings + if (contentTypeInfo.warningMessage) { + content += `\n\n${LLMTool.TOOL_STYLES_CONSOLE.status.warning(contentTypeInfo.warningMessage)}`; + } + + // Add conversation content preview if included + if (data.conversationContent) { + content += `\n\n${LLMTool.TOOL_STYLES_CONSOLE.base.label('Content included in messages:')}`; + const contentPreview = data.conversationContent.length > 500 + ? `${data.conversationContent.substring(0, 500)}...` + : data.conversationContent; + content += `\n\n${LLMTool.TOOL_STYLES_CONSOLE.content.code(contentPreview)}`; + } + + return { + title: LLMTool.TOOL_STYLES_CONSOLE.content.title('Tool Result', 'Download Resource'), + subtitle: LLMTool.TOOL_STYLES_CONSOLE.content.subtitle(`${operation} ${data.resourcePath}`), + content, + preview: `${operation} ${bytesDownloaded} bytes in ${durationMs}ms`, + }; + } else { + return { + title: LLMTool.TOOL_STYLES_CONSOLE.content.title('Tool Result', 'Download Resource'), + subtitle: LLMTool.TOOL_STYLES_CONSOLE.content.subtitle('failed'), + content: LLMTool.TOOL_STYLES_CONSOLE.status.error(String(bbResponse)), + preview: 'Download failed', + }; + } +}; diff --git a/api/src/llms/tools/downloadResource.tool/info.json b/api/src/llms/tools/downloadResource.tool/info.json new file mode 100644 index 00000000..34aac50e --- /dev/null +++ b/api/src/llms/tools/downloadResource.tool/info.json @@ -0,0 +1,10 @@ +{ + "name": "download_resource", + "description": "Download resources using direct HTTP requests (like curl) and save to data sources. Handles query parameter encoding and provides comprehensive error reporting. Use for static resources, API endpoints, raw files, or when you need custom authentication/headers. Does NOT use a browser - no JavaScript execution or dynamic content loading. For pages requiring browser context (GitHub pages, SPAs, dynamic content), use fetch_web_page or fetch_web_screenshot instead. Supports all HTTP methods, authentication, and saves directly to datasources with content type validation.", + "version": "1.0.0", + "enabled": true, + "category": "data-retrieval", + "capabilities": ["resource-download"], + "author": "BB Team", + "license": "MIT" +} diff --git a/api/src/llms/tools/downloadResource.tool/tests/tool.test.ts b/api/src/llms/tools/downloadResource.tool/tests/tool.test.ts new file mode 100644 index 00000000..7525636d --- /dev/null +++ b/api/src/llms/tools/downloadResource.tool/tests/tool.test.ts @@ -0,0 +1,837 @@ +import { assert, assertEquals, assertRejects, assertStringIncludes } from 'api/tests/deps.ts'; + +import type { LLMAnswerToolUse } from 'api/llms/llmMessage.ts'; +import { makeOrchestratorControllerStub } from 'api/tests/stubs.ts'; +import { + createTestInteraction, + getProjectEditor, + getTestFilePath, + getToolManager, + withTestProject, +} from 'api/tests/testSetup.ts'; +import { ResourceHandlingError, ToolHandlingError } from 'api/errors/error.ts'; +import type { LLMToolDownloadResourceResultData } from '../types.ts'; + +// Type guard function +function isDownloadResourceResponse( + response: unknown, +): response is { data: LLMToolDownloadResourceResultData } { + const data = response && typeof response === 'object' && 'data' in response + ? (response as { data: unknown }).data + : null; + return ( + data !== null && + typeof data === 'object' && + 'url' in data && + typeof data.url === 'string' && + 'resourcePath' in data && + typeof data.resourcePath === 'string' && + 'bytesDownloaded' in data && + typeof data.bytesDownloaded === 'number' && + 'isNewResource' in data && + typeof data.isNewResource === 'boolean' + ); +} + +// Type guard to check if toolResults is a string +function isString(value: unknown): value is string { + return typeof value === 'string'; +} + +// Mock fetch for testing +let originalFetch: typeof globalThis.fetch; +type MockMatcher = { + baseUrl: string; + requiredParams?: Record; + response: Response; +}; + +let mockMatchers: MockMatcher[] = []; + +function setupMockFetch() { + originalFetch = globalThis.fetch; + globalThis.fetch = async (input: string | Request | URL, init?: RequestInit): Promise => { + const url = typeof input === 'string' ? input : input.toString(); + console.log('setupMockFetch - url:', url); + console.log('setupMockFetch - mockMatchers:', mockMatchers); + const parsedUrl = new URL(url); + + // Check matchers first + for (const matcher of mockMatchers) { + if (urlMatches(parsedUrl, matcher)) { + return matcher.response.clone(); // Clone to avoid stream consumption issues + } + } + + // Default successful text response + if (url.includes('success-text')) { + return new Response('Hello from downloaded resource!', { + status: 200, + headers: { 'content-type': 'text/plain', 'content-length': '31' }, + }); + } + + // JSON response + if (url.includes('success-json')) { + const jsonData = { message: 'Hello JSON', data: [1, 2, 3] }; + return new Response(JSON.stringify(jsonData), { + status: 200, + headers: { 'content-type': 'application/json' }, + }); + } + + // Binary response (simulate small image) + if (url.includes('success-binary')) { + const binaryData = new Uint8Array([0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A]); // PNG header + return new Response(binaryData, { + status: 200, + headers: { 'content-type': 'image/png', 'content-length': '8' }, + }); + } + + // Error responses + if (url.includes('error-404')) { + return new Response('Not Found', { status: 404 }); + } + + if (url.includes('error-timeout')) { + // Simulate timeout + throw new Error('Request timeout'); + } + + // Default fallback + return new Response('Default response', { + status: 200, + headers: { 'content-type': 'text/plain' }, + }); + }; +} +function urlMatches(url: URL, matcher: MockMatcher): boolean { + const matcherUrl = new URL(matcher.baseUrl); + + if (url.origin !== matcherUrl.origin || url.pathname !== matcherUrl.pathname) { + return false; + } + + if (matcher.requiredParams) { + return Object.entries(matcher.requiredParams) + .every(([key, value]) => url.searchParams.get(key) === value); + } + + return true; +} + +function addMockResponse(baseUrl: string, response: Response, requiredParams?: Record) { + mockMatchers.push({ baseUrl, requiredParams, response }); +} +function clearMockMatchers() { + mockMatchers.length = 0; +} + +function restoreFetch() { + globalThis.fetch = originalFetch; + clearMockMatchers(); +} + +Deno.test({ + name: 'Download Resource Tool - download simple text file', + async fn() { + await withTestProject(async (testProjectId, testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction('test-collaboration', 'test-interaction', projectEditor); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + try { + const testResource = 'downloaded-text.txt'; + const testResourcePath = getTestFilePath(testProjectRoot, testResource); + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://example.com/success-text', + resourcePath: testResource, + }, + }; + + const result = await tool.runTool(interaction, toolUse, projectEditor); + + assert( + result.bbResponse && typeof result.bbResponse === 'object', + 'bbResponse should be an object', + ); + assert( + isDownloadResourceResponse(result.bbResponse), + 'bbResponse should have the correct structure for DownloadResource', + ); + + if (isDownloadResourceResponse(result.bbResponse)) { + assertEquals( + result.bbResponse.data.resourcePath, + testResource, + `Test response resourcePath should be "${testResource}"`, + ); + assertEquals( + result.bbResponse.data.url, + 'https://example.com/success-text', + 'Test response url should match input', + ); + assertEquals( + result.bbResponse.data.isNewResource, + true, + 'Test response isNewResource should be true', + ); + assertEquals( + result.bbResponse.data.bytesDownloaded, + 31, + 'Test response bytesDownloaded should be 31', + ); + } + + assertStringIncludes(result.toolResponse, 'Downloaded'); + if (isString(result.toolResults)) { + assertStringIncludes(result.toolResults, 'Downloaded GET https://example.com/success-text'); + } + + // Verify file was actually created + assert(await Deno.stat(testResourcePath)); + const content = await Deno.readTextFile(testResourcePath); + assertEquals(content, 'Hello from downloaded resource!'); + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); + +Deno.test({ + name: 'Download Resource Tool - download JSON with custom headers', + async fn() { + await withTestProject(async (testProjectId, testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction( + 'test-collaboration', + 'test-interaction', + projectEditor, + ); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + try { + const testResource = 'data.json'; + const testResourcePath = getTestFilePath(testProjectRoot, testResource); + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://api.example.com/success-json', + resourcePath: testResource, + headers: { + 'Accept': 'application/json', + 'User-Agent': 'BB-Tool/1.0', + }, + includeInMessages: true, + }, + }; + + const result = await tool.runTool(interaction, toolUse, projectEditor); + + assert( + isDownloadResourceResponse(result.bbResponse), + 'bbResponse should have the correct structure', + ); + + if (isDownloadResourceResponse(result.bbResponse)) { + assertEquals( + result.bbResponse.data.response.contentTypeInfo.contentType, + 'text', + 'JSON should be detected as text content', + ); + // Should include content in conversation + assert( + result.bbResponse.data.conversationContent, + 'Should include conversation content', + ); + } + + // Verify file was created with JSON content + assert(await Deno.stat(testResourcePath)); + const content = await Deno.readTextFile(testResourcePath); + const parsedJson = JSON.parse(content); + assertEquals(parsedJson.message, 'Hello JSON'); + assertEquals(parsedJson.data, [1, 2, 3]); + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); + +Deno.test({ + name: 'Download Resource Tool - download binary content', + async fn() { + await withTestProject(async (testProjectId, testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction( + 'test-collaboration', + 'test-interaction', + projectEditor, + ); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + try { + const testResource = 'image.png'; + const testResourcePath = getTestFilePath(testProjectRoot, testResource); + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://images.example.com/success-binary', + resourcePath: testResource, + includeInMessages: true, // Should not include binary in messages + }, + }; + + const result = await tool.runTool(interaction, toolUse, projectEditor); + + assert( + isDownloadResourceResponse(result.bbResponse), + 'bbResponse should have the correct structure', + ); + + if (isDownloadResourceResponse(result.bbResponse)) { + assertEquals( + result.bbResponse.data.response.contentTypeInfo.contentType, + 'image', + 'PNG should be detected as image content', + ); + assertEquals( + result.bbResponse.data.bytesDownloaded, + 8, + 'Should download 8 bytes', + ); + // Should NOT include binary content in conversation + assertEquals( + result.bbResponse.data.conversationContent, + undefined, + 'Should not include binary content in conversation', + ); + } + + // Verify binary file was created + assert(await Deno.stat(testResourcePath)); + const content = await Deno.readFile(testResourcePath); + assertEquals(content.length, 8); + assertEquals(content[0], 0x89); // PNG signature + assertEquals(content[1], 0x50); + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); + +Deno.test({ + name: 'Download Resource Tool - POST request with authentication', + async fn() { + await withTestProject(async (testProjectId, testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction( + 'test-collaboration', + 'test-interaction', + projectEditor, + ); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + // Mock a POST endpoint + addMockResponse( + 'https://api.example.com/data', + new Response('POST response data', { status: 200, headers: { 'content-type': 'text/plain' } }), + { format: 'json', version: 'v1' }, + ); + try { + const testResource = 'post-data.txt'; + const testResourcePath = getTestFilePath(testProjectRoot, testResource); + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://api.example.com/data', + method: 'POST', + resourcePath: testResource, + auth: { + type: 'bearer', + token: 'secret-bearer-token', + }, + requestBody: { + content: JSON.stringify({ query: 'test data' }), + contentType: 'application/json', + }, + queryParams: { + format: 'json', + version: 'v1', + }, + }, + }; + + const result = await tool.runTool(interaction, toolUse, projectEditor); + + assert( + isDownloadResourceResponse(result.bbResponse), + 'bbResponse should have the correct structure', + ); + + if (isDownloadResourceResponse(result.bbResponse)) { + assertEquals( + result.bbResponse.data.method, + 'POST', + 'Should record POST method', + ); + } + + // Verify file was created + assert(await Deno.stat(testResourcePath)); + const content = await Deno.readTextFile(testResourcePath); + assertEquals(content, 'POST response data'); + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); + +Deno.test({ + name: 'Download Resource Tool - overwrite existing file', + async fn() { + await withTestProject(async (testProjectId, testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction( + 'test-collaboration', + 'test-interaction', + projectEditor, + ); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + try { + const testResource = 'overwrite-test.txt'; + const testResourcePath = getTestFilePath(testProjectRoot, testResource); + + // Create existing file + await Deno.writeTextFile(testResourcePath, 'Original content'); + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://example.com/success-text', + resourcePath: testResource, + overwriteExisting: true, + }, + }; + + const result = await tool.runTool(interaction, toolUse, projectEditor); + + assert( + isDownloadResourceResponse(result.bbResponse), + 'bbResponse should have the correct structure', + ); + + if (isDownloadResourceResponse(result.bbResponse)) { + assertEquals( + result.bbResponse.data.isNewResource, + false, + 'Should not be a new resource', + ); + } + + assertStringIncludes(result.toolResponse, 'Downloaded and overwrote'); + + // Verify file was overwritten + const content = await Deno.readTextFile(testResourcePath); + assertEquals(content, 'Hello from downloaded resource!'); + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); + +Deno.test({ + name: 'Download Resource Tool - fail when file exists and overwrite is false', + async fn() { + await withTestProject(async (testProjectId, testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction( + 'test-collaboration', + 'test-interaction', + projectEditor, + ); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + try { + const testResource = 'existing-file.txt'; + const testResourcePath = getTestFilePath(testProjectRoot, testResource); + + // Create existing file + await Deno.writeTextFile(testResourcePath, 'Original content'); + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://example.com/success-text', + resourcePath: testResource, + overwriteExisting: false, // Default behavior + }, + }; + + await assertRejects( + async () => await tool.runTool(interaction, toolUse, projectEditor), + ResourceHandlingError, + 'already exists and overwriteExisting is false', + ); + + // Verify original file is unchanged + const content = await Deno.readTextFile(testResourcePath); + assertEquals(content, 'Original content'); + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); + +Deno.test({ + name: 'Download Resource Tool - HTTP error response', + async fn() { + await withTestProject(async (testProjectId, _testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction( + 'test-collaboration', + 'test-interaction', + projectEditor, + ); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + try { + const testResource = 'error-test.txt'; + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://example.com/error-404', + resourcePath: testResource, + }, + }; + + await assertRejects( + async () => await tool.runTool(interaction, toolUse, projectEditor), + ResourceHandlingError, + 'HTTP 404', + ); + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); + +Deno.test({ + name: 'Download Resource Tool - content type mismatch warning', + async fn() { + await withTestProject(async (testProjectId, testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction( + 'test-collaboration', + 'test-interaction', + projectEditor, + ); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + // Mock response with mismatched content type + addMockResponse( + 'https://example.com/mismatch', + new Response('This is actually text', { + status: 200, + headers: { 'content-type': 'image/png' }, // Wrong type! + }), + ); + + try { + const testResource = 'data.txt'; // Text extension but will get image type + const testResourcePath = getTestFilePath(testProjectRoot, testResource); + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://example.com/mismatch', + resourcePath: testResource, + }, + }; + + const result = await tool.runTool(interaction, toolUse, projectEditor); + + assert( + isDownloadResourceResponse(result.bbResponse), + 'bbResponse should have the correct structure', + ); + + if (isDownloadResourceResponse(result.bbResponse)) { + // Should have a warning about content type mismatch + assert( + result.bbResponse.data.response.contentTypeInfo.warningMessage, + 'Should have content type warning', + ); + assertStringIncludes( + result.bbResponse.data.response.contentTypeInfo.warningMessage!, + 'Content type mismatch', + ); + } + + // Should still download the file + assert(await Deno.stat(testResourcePath)); + const content = await Deno.readTextFile(testResourcePath); + assertEquals(content, 'This is actually text'); + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); + +Deno.test({ + name: 'Download Resource Tool - throw error for resource outside project', + async fn() { + await withTestProject(async (testProjectId, _testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction( + 'test-collaboration', + 'test-interaction', + projectEditor, + ); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + try { + const testResourcePath = '/tmp/outside_project.txt'; + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://example.com/success-text', + resourcePath: testResourcePath, + }, + }; + + await assertRejects( + async () => await tool.runTool(interaction, toolUse, projectEditor), + ResourceHandlingError, + `Access denied: ${testResourcePath} is outside the data source directory`, + ); + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); + +Deno.test({ + name: 'Download Resource Tool - API key authentication as query parameter', + async fn() { + await withTestProject(async (testProjectId, testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const interaction = await createTestInteraction( + 'test-collaboration', + 'test-interaction', + projectEditor, + ); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('download_resource'); + assert(tool, 'Failed to get tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + setupMockFetch(); + try { + const testResource = 'api-key-test.txt'; + const testResourcePath = getTestFilePath(testProjectRoot, testResource); + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-id', + toolName: 'download_resource', + toolInput: { + url: 'https://api.example.com/success-text', + resourcePath: testResource, + auth: { + type: 'apikey', + token: 'my-api-key-123', + useQueryParam: true, + queryParamName: 'key', + }, + }, + }; + + const result = await tool.runTool(interaction, toolUse, projectEditor); + + assert( + isDownloadResourceResponse(result.bbResponse), + 'bbResponse should have the correct structure', + ); + + // Verify file was created + assert(await Deno.stat(testResourcePath)); + const content = await Deno.readTextFile(testResourcePath); + assertEquals(content, 'Hello from downloaded resource!'); + + // The final URL should include the API key as query parameter + if (isDownloadResourceResponse(result.bbResponse)) { + assertStringIncludes( + result.bbResponse.data.response.finalUrl, + 'key=my-api-key-123', + ); + } + } finally { + logChangeAndCommitStub.restore(); + restoreFetch(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); diff --git a/api/src/llms/tools/downloadResource.tool/tool.ts b/api/src/llms/tools/downloadResource.tool/tool.ts new file mode 100644 index 00000000..a3d8f7ea --- /dev/null +++ b/api/src/llms/tools/downloadResource.tool/tool.ts @@ -0,0 +1,714 @@ +//import type { JSX } from 'preact'; +import LLMTool from 'api/llms/llmTool.ts'; +import type { LLMToolInputSchema, LLMToolLogEntryFormattedResult, LLMToolRunResult } from 'api/llms/llmTool.ts'; +import { + formatLogEntryToolResult as formatLogEntryToolResultBrowser, + formatLogEntryToolUse as formatLogEntryToolUseBrowser, +} from './formatter.browser.tsx'; +import { + formatLogEntryToolResult as formatLogEntryToolResultConsole, + formatLogEntryToolUse as formatLogEntryToolUseConsole, +} from './formatter.console.ts'; +import type { + AuthConfig, + ContentTypeInfo, + LLMToolDownloadResourceInput, + LLMToolDownloadResourceResultData, + RequestBodyConfig, + ResponseMetadata, +} from './types.ts'; +import type LLMConversationInteraction from 'api/llms/conversationInteraction.ts'; +import type { CollaborationLogEntryContentToolResult } from 'shared/types.ts'; +import type { LLMAnswerToolUse } from 'api/llms/llmMessage.ts'; +import type ProjectEditor from 'api/editor/projectEditor.ts'; +import { createError, ErrorType } from 'api/utils/error.ts'; +import type { + DataSourceHandlingErrorOptions, + ResourceHandlingErrorOptions, + ToolHandlingErrorOptions, +} from 'api/errors/error.ts'; +import { isResourceNotFoundError } from 'api/errors/error.ts'; +import { logger } from 'shared/logger.ts'; +import { checkDatasourceAccess } from 'api/utils/featureAccess.ts'; +import { enhanceDatasourceError } from '../../../utils/datasourceErrorEnhancement.ts'; +import { type DOMConfig, extractTextFromHtml, validateHtml } from '../../../utils/dom.utils.ts'; +import { getContentType, isTextMimeType } from '../../../utils/contentTypes.utils.ts'; +import type { ResourceMetadata } from 'shared/types/dataSourceResource.ts'; + +// Constants for download limits and timeouts +const DEFAULT_TIMEOUT = 60 * 1000; // 60 seconds +const DEFAULT_MAX_REDIRECTS = 10; +const DEFAULT_MAX_FILE_SIZE = 50 * 1024 * 1024; // 50MB +const MAX_CONTENT_FOR_MESSAGES = 1024 * 1024; // 1MB for conversation inclusion + +export default class LLMToolDownloadResource extends LLMTool { + get inputSchema(): LLMToolInputSchema { + return { + type: 'object', + properties: { + url: { + type: 'string', + description: + `The complete URL of the resource to download. Supports HTTP and HTTPS protocols. Important considerations: + +1. URL Requirements: + * Must include protocol (http:// or https://) + * URLs with special characters must be properly encoded (e.g., spaces as %20) + * Tool automatically handles encoding of query parameter values only + * Must be accessible from the server + Examples: + * "https://api.github.com/repos/owner/repo/archive/main.zip" + * "https://example.com/data/file.json" + * "https://example.com/path%20with%20spaces/file.pdf" (spaces must be pre-encoded) + * "http://localhost:3000/api/export.csv" + +2. Supported Content Types: + * Text files: JSON, CSV, XML, plain text, code files + * Binary files: Images, PDFs, archives, executables + * Will auto-detect content type from response headers + +3. Common Error Scenarios: + * HTTP 403/401: Authentication required or access denied + * HTTP 404: Resource not found at the specified URL + * HTTP 429: Rate limit exceeded, retry after delay + * HTTP 500-series: Server errors, may be temporary + * Network timeouts: Server may be slow or unreachable + * SSL/TLS errors: Certificate issues with HTTPS URLs + * File size limits: Downloads exceeding maxFileSize will be rejected`, + }, + method: { + type: 'string', + enum: ['GET', 'POST', 'PUT', 'PATCH', 'DELETE'], + default: 'GET', + description: 'HTTP method to use for the request. GET is most common for downloads.', + }, + headers: { + type: 'object', + additionalProperties: { type: 'string' }, + description: `Custom headers to include in the request. Common examples: + * "Accept": "application/json" - Request specific content type + * "User-Agent": "MyApp/1.0" - Identify your application + * "Referer": "https://example.com" - Specify referring page + * Custom API headers as needed`, + }, + auth: { + type: 'object', + description: 'Authentication configuration for the request.', + properties: { + type: { + type: 'string', + enum: ['basic', 'bearer', 'apikey', 'none'], + description: 'Type of authentication to use.', + }, + username: { + type: 'string', + description: 'Username for basic authentication.', + }, + password: { + type: 'string', + description: 'Password for basic authentication.', + }, + token: { + type: 'string', + description: 'Bearer token or API key value.', + }, + headerName: { + type: 'string', + default: 'X-API-Key', + description: 'Header name for API key authentication.', + }, + useQueryParam: { + type: 'boolean', + default: false, + description: 'Send API key as query parameter instead of header.', + }, + queryParamName: { + type: 'string', + default: 'api_key', + description: 'Query parameter name for API key.', + }, + }, + required: ['type'], + }, + queryParams: { + type: 'object', + additionalProperties: { type: 'string' }, + description: 'Query parameters to append to the URL.', + }, + requestBody: { + type: 'object', + description: 'Request body configuration for POST/PUT requests.', + properties: { + content: { + type: 'string', + description: 'Content to send in request body.', + }, + contentType: { + type: 'string', + description: + 'Content-Type header value (e.g., "application/json", "application/x-www-form-urlencoded").', + }, + }, + required: ['content', 'contentType'], + }, + dataSourceId: { + type: 'string', + description: + "Data source ID to operate on. Defaults to the primary data source if omitted. Examples: 'primary', 'filesystem-1', 'db-staging'. Data sources are identified by their name (e.g., 'primary', 'local-2', 'supabase').", + }, + resourcePath: { + type: 'string', + description: + 'The path where to save the downloaded content, relative to the data source root. Must be within the data source directory. Example: "downloads/data.json", "assets/image.png".', + }, + overwriteExisting: { + type: 'boolean', + default: false, + description: 'Whether to overwrite the resource if it already exists. Default is false.', + }, + createMissingDirectories: { + type: 'boolean', + default: true, + description: 'Whether to create missing parent directories. Default is true.', + }, + includeInMessages: { + type: 'boolean', + default: false, + description: + 'Whether to include the downloaded content in conversation messages. Only applies to text content under 1MB. Binary content is never included.', + }, + followRedirects: { + type: 'boolean', + default: true, + description: 'Whether to follow HTTP redirects. Default is true.', + }, + maxRedirects: { + type: 'number', + default: DEFAULT_MAX_REDIRECTS, + minimum: 0, + maximum: 20, + description: 'Maximum number of redirects to follow.', + }, + timeout: { + type: 'number', + default: DEFAULT_TIMEOUT, + minimum: 1000, + maximum: 300000, + description: 'Request timeout in milliseconds. Default is 60 seconds.', + }, + maxFileSize: { + type: 'number', + default: DEFAULT_MAX_FILE_SIZE, + minimum: 1024, + maximum: 100 * 1024 * 1024, + description: 'Maximum file size to download in bytes. Default is 50MB.', + }, + }, + required: ['url', 'resourcePath'], + }; + } + + formatLogEntryToolUse( + toolInput: LLMToolInputSchema, + format: 'console' | 'browser', + ): LLMToolLogEntryFormattedResult { + return format === 'console' ? formatLogEntryToolUseConsole(toolInput) : formatLogEntryToolUseBrowser(toolInput); + } + + formatLogEntryToolResult( + resultContent: CollaborationLogEntryContentToolResult, + format: 'console' | 'browser', + ): LLMToolLogEntryFormattedResult { + return format === 'console' + ? formatLogEntryToolResultConsole(resultContent) + : formatLogEntryToolResultBrowser(resultContent); + } + + /** + * Extract resource ID from URI based on provider type + */ + private extractResourceIdFromUri(uri: string, providerType: string): string { + try { + switch (providerType) { + case 'filesystem': + default: { + const match = uri.match(/filesystem[^:]*:\.\/(.+)$/); + return match ? match[1] : uri; + } + } + } catch (error) { + return uri; + } + } + + /** + * Detect content type and validate against file extension + */ + private detectContentType(mimeType: string, resourcePath: string): ContentTypeInfo { + // Normalize MIME type + const normalizedMimeType = mimeType.toLowerCase().split(';')[0].trim(); + + // Determine content type category using existing utility + let contentType: 'text' | 'image' | 'binary'; + if (isTextMimeType(normalizedMimeType)) { + contentType = 'text'; + } else if (normalizedMimeType.startsWith('image/')) { + contentType = 'image'; + } else { + contentType = 'binary'; + } + + // Get expected MIME type from file extension + const expectedMimeType = getContentType(resourcePath); + const pathExtension = resourcePath.split('.').pop()?.toLowerCase(); + + // Check if the response MIME type matches the expected type from extension + const mimeTypeMatch = normalizedMimeType === expectedMimeType || + // Allow some common variations + (normalizedMimeType === 'image/jpeg' && expectedMimeType === 'image/jpg') || + (normalizedMimeType === 'application/javascript' && expectedMimeType === 'text/javascript'); + + let warningMessage: string | undefined; + if (!mimeTypeMatch && pathExtension && expectedMimeType !== 'application/octet-stream') { + warningMessage = + `Content type mismatch: response MIME type '${normalizedMimeType}' doesn't match file extension '${pathExtension}' (expected '${expectedMimeType}')`; + } + + return { + mimeType: normalizedMimeType, + contentType, + fileExtension: pathExtension, + extensionMatch: mimeTypeMatch, + warningMessage, + }; + } + + /** + * Build the complete URL with query parameters + */ + private buildUrlWithParams(baseUrl: string, queryParams?: Record, auth?: AuthConfig): string { + const url = new URL(baseUrl); + + // Add regular query parameters + if (queryParams) { + Object.entries(queryParams).forEach(([key, value]) => { + url.searchParams.append(key, value); + }); + } + + // Add API key as query parameter if configured + if (auth?.type === 'apikey' && auth.useQueryParam && auth.token) { + const paramName = auth.queryParamName || 'api_key'; + url.searchParams.append(paramName, auth.token); + } + + return url.toString(); + } + + /** + * Build headers for the request + */ + private buildHeaders( + headers?: Record, + auth?: AuthConfig, + requestBody?: RequestBodyConfig, + ): HeadersInit { + const requestHeaders: Record = { ...headers }; + + // Add authentication headers + if (auth) { + switch (auth.type) { + case 'basic': + if (auth.username && auth.password) { + const credentials = btoa(`${auth.username}:${auth.password}`); + requestHeaders['Authorization'] = `Basic ${credentials}`; + } + break; + case 'bearer': + if (auth.token) { + requestHeaders['Authorization'] = `Bearer ${auth.token}`; + } + break; + case 'apikey': + if (auth.token && !auth.useQueryParam) { + const headerName = auth.headerName || 'X-API-Key'; + requestHeaders[headerName] = auth.token; + } + break; + } + } + + // Add Content-Type for request body + if (requestBody) { + requestHeaders['Content-Type'] = requestBody.contentType; + } + + return requestHeaders; + } + + async runTool( + interaction: LLMConversationInteraction, + toolUse: LLMAnswerToolUse, + projectEditor: ProjectEditor, + ): Promise { + const { toolInput } = toolUse; + const { + url, + method = 'GET', + headers, + auth, + queryParams, + requestBody, + dataSourceId = undefined, + resourcePath, + overwriteExisting = false, + createMissingDirectories = true, + includeInMessages = false, + followRedirects = true, + maxRedirects = DEFAULT_MAX_REDIRECTS, + timeout = DEFAULT_TIMEOUT, + maxFileSize = DEFAULT_MAX_FILE_SIZE, + } = toolInput as LLMToolDownloadResourceInput; + + // Get datasource connections + const { primaryDsConnection, dsConnections, notFound } = this.getDsConnectionsById( + projectEditor, + dataSourceId ? [dataSourceId] : undefined, + ); + + const dsConnectionToUse = dsConnections[0] || primaryDsConnection; + const dsConnectionToUseId = dsConnectionToUse.id; + if (!dsConnectionToUseId) { + throw createError(ErrorType.DataSourceHandling, `No data source id`, { + name: 'datasource', + dataSourceIds: dataSourceId ? [dataSourceId] : undefined, + } as DataSourceHandlingErrorOptions); + } + + if (!dsConnectionToUse) { + throw createError(ErrorType.DataSourceHandling, `No primary data source`, { + name: 'datasource', + dataSourceIds: dataSourceId ? [dataSourceId] : undefined, + } as DataSourceHandlingErrorOptions); + } + + // Check datasource write access + const hasWriteAccess = await checkDatasourceAccess( + projectEditor.userContext, + dsConnectionToUse.providerType, + 'write', + ); + if (!hasWriteAccess) { + throw createError( + ErrorType.ToolHandling, + `Write access for ${dsConnectionToUse.providerType} not available on your current plan`, + { + toolName: 'download_resource', + operation: 'capability-check', + } as ToolHandlingErrorOptions, + ); + } + + // Get resource accessor + const resourceAccessor = await dsConnectionToUse.getResourceAccessor(); + if (!resourceAccessor.writeResource) { + throw createError(ErrorType.ToolHandling, `No writeResource method on resourceAccessor`, { + toolName: 'download_resource', + operation: 'tool-run', + } as ToolHandlingErrorOptions); + } + + // Validate resource path is within datasource + const resourceUri = (resourcePath.includes('://') || resourcePath.startsWith('file:')) + ? dsConnectionToUse.getUriForResource(resourcePath) + : dsConnectionToUse.getUriForResource(`file:./${resourcePath}`); + + if (!await dsConnectionToUse.isResourceWithinDataSource(resourceUri)) { + throw createError( + ErrorType.ResourceHandling, + `Access denied: ${resourcePath} is outside the data source directory`, + { + name: 'download-resource', + filePath: resourcePath, + operation: 'write', + } as ResourceHandlingErrorOptions, + ); + } + + const startTime = Date.now(); + let redirectCount = 0; + let finalUrl = url; + let isNewResource = true; + + try { + // Check if resource exists + try { + await resourceAccessor.loadResource(resourceUri); + isNewResource = false; + + if (!overwriteExisting) { + throw createError( + ErrorType.ResourceHandling, + `Resource ${resourcePath} already exists and overwriteExisting is false`, + { + name: 'download-resource', + filePath: resourcePath, + operation: 'write', + } as ResourceHandlingErrorOptions, + ); + } + } catch (error) { + if (isResourceNotFoundError(error)) { + isNewResource = true; + logger.info(`LLMToolDownloadResource: Resource ${resourceUri} not found. Creating new resource.`); + + // Create missing directories if needed + if (createMissingDirectories) { + await resourceAccessor.ensureResourcePathExists(resourceUri); + logger.info(`LLMToolDownloadResource: Created directory structure for ${resourceUri}`); + } + } else { + throw error; + } + } + + // Build the complete URL with parameters + finalUrl = this.buildUrlWithParams(url, queryParams, auth); + + // Build headers + const requestHeaders = this.buildHeaders(headers, auth, requestBody); + + // Create abort controller for timeout + const controller = new AbortController(); + const timeoutId = setTimeout(() => controller.abort(), timeout); + + try { + // Make the HTTP request + const fetchOptions: RequestInit = { + method, + headers: requestHeaders, + redirect: followRedirects ? 'follow' : 'manual', + signal: controller.signal, + }; + + if (requestBody && (method === 'POST' || method === 'PUT' || method === 'PATCH')) { + fetchOptions.body = requestBody.content; + } + + logger.info(`LLMToolDownloadResource: Starting download from ${finalUrl} using ${method}`); + const response = await fetch(finalUrl, fetchOptions); + + clearTimeout(timeoutId); + + // Check response status + if (!response.ok) { + throw createError( + ErrorType.ResourceHandling, + `HTTP ${response.status} ${response.statusText} from ${finalUrl}`, + { + name: 'download-resource', + filePath: resourcePath, + operation: 'write', + } as ResourceHandlingErrorOptions, + ); + } + + // Get response metadata + const responseHeaders: Record = {}; + response.headers.forEach((value, key) => { + responseHeaders[key.toLowerCase()] = value; + }); + + const contentLength = response.headers.get('content-length') + ? parseInt(response.headers.get('content-length')!) + : undefined; + const mimeType = response.headers.get('content-type') || 'application/octet-stream'; + + // Check file size limit + if (contentLength && contentLength > maxFileSize) { + throw createError( + ErrorType.ResourceHandling, + `File size ${contentLength} bytes exceeds maximum allowed size ${maxFileSize} bytes`, + { + name: 'download-resource', + filePath: resourcePath, + operation: 'write', + } as ResourceHandlingErrorOptions, + ); + } + + // Detect content type and validate extension + const contentTypeInfo = this.detectContentType(mimeType, resourcePath); + + // Download content as appropriate type + let contentToWrite: string | Uint8Array; + let conversationContent: string | undefined; + + if (contentTypeInfo.contentType === 'text') { + const textContent = await response.text(); + contentToWrite = textContent; + + // Include in messages if requested and not too large + if (includeInMessages && textContent.length <= MAX_CONTENT_FOR_MESSAGES) { + // Clean HTML content if it's HTML + if (mimeType.includes('html')) { + try { + const validation = await validateHtml(textContent); + if (validation.isValid) { + conversationContent = await extractTextFromHtml(textContent); + } else { + conversationContent = textContent; + } + } catch { + conversationContent = textContent; + } + } else { + conversationContent = textContent; + } + } + } else { + // Handle binary content + const arrayBuffer = await response.arrayBuffer(); + contentToWrite = new Uint8Array(arrayBuffer); + + // Check size limit after download for binary content + if (contentToWrite.length > maxFileSize) { + throw createError( + ErrorType.ResourceHandling, + `Downloaded file size ${contentToWrite.length} bytes exceeds maximum allowed size ${maxFileSize} bytes`, + { + name: 'download-resource', + filePath: resourcePath, + operation: 'write', + } as ResourceHandlingErrorOptions, + ); + } + } + + const endTime = Date.now(); + const durationMs = endTime - startTime; + + // Write using resource accessor + logger.info(`LLMToolDownloadResource: Writing content to ${resourceUri}`); + const writeResult = await resourceAccessor.writeResource(resourceUri, contentToWrite, { + overwrite: overwriteExisting, + createMissingDirectories, + contentFormat: contentTypeInfo.contentType === 'text' ? 'plain-text' : 'binary', + }); + + if (!writeResult.success) { + throw createError( + ErrorType.ResourceHandling, + `Writing downloaded resource failed for ${resourcePath}`, + { + name: 'download-resource', + filePath: resourcePath, + operation: 'write', + } as ResourceHandlingErrorOptions, + ); + } + + logger.info( + `LLMToolDownloadResource: Successfully downloaded and saved ${writeResult.bytesWritten} bytes to ${writeResult.uri}`, + ); + + // Extract resource ID from URI + const resourceId = this.extractResourceIdFromUri(writeResult.uri, dsConnectionToUse.providerType); + + // Create response metadata + const responseMetadata: ResponseMetadata = { + status: response.status, + statusText: response.statusText, + headers: responseHeaders, + finalUrl: response.url || finalUrl, + redirectCount, + contentLength, + contentTypeInfo, + }; + + // Log change and commit for text content + if (contentTypeInfo.contentType === 'text') { + logger.info(`LLMToolDownloadResource: Saving conversation download resource: ${interaction.id}`); + await projectEditor.orchestratorController.logChangeAndCommit( + interaction, + dsConnectionToUse.getDataSourceRoot(), + resourcePath, + contentToWrite as string, + ); + } + + // Build tool results + const bytesDownloaded = writeResult.bytesWritten || 0; + const dsConnectionStatus = notFound.length > 0 + ? `Could not find data source for: [${notFound.join(', ')}]` + : `Data source: ${dsConnectionToUse.name} [${dsConnectionToUse.id}]`; + + const resultData: LLMToolDownloadResourceResultData = { + url, + method, + resourcePath, + resourceId, + response: responseMetadata, + bytesDownloaded, + durationMs, + isNewResource, + dataSource: { + dsConnectionId: dsConnectionToUse.id!, + dsConnectionName: dsConnectionToUse.name, + dsProviderType: dsConnectionToUse.providerType, + }, + conversationContent, + }; + + const warnings = contentTypeInfo.warningMessage ? [contentTypeInfo.warningMessage] : []; + + const toolResults = conversationContent || + `Downloaded ${method} ${finalUrl} → ${resourcePath}\n` + + `Content-Type: ${mimeType}\n` + + `Size: ${bytesDownloaded} bytes\n` + + `Duration: ${durationMs}ms\n` + + `Resource ID: ${resourceId}` + + (warnings.length > 0 ? `\n\nWarnings:\n${warnings.join('\n')}` : ''); + + const toolResponse = `${dsConnectionStatus}\n` + + `${isNewResource ? 'Downloaded' : 'Downloaded and overwrote'} ${finalUrl} to ${resourcePath}\n` + + `Content-Type: ${mimeType}, Size: ${bytesDownloaded} bytes, Duration: ${durationMs}ms\n` + + `Resource ID: ${resourceId}` + + (warnings.length > 0 ? `\nWarnings: ${warnings.join(', ')}` : ''); + + return { + toolResults, + toolResponse, + bbResponse: { + data: resultData, + }, + }; + } finally { + clearTimeout(timeoutId); + } + } catch (error) { + if ((error as Error).name === 'download-resource') { + throw error; + } + + const originalErrorMessage = `Failed to download resource from ${finalUrl}: ${(error as Error).message}`; + + // Enhance error message with datasource-specific guidance + const enhancedErrorMessage = enhanceDatasourceError( + originalErrorMessage, + dsConnectionToUse.provider, + 'write', + resourcePath, + interaction, + ); + + logger.error(`LLMToolDownloadResource: ${enhancedErrorMessage}`); + + throw createError(ErrorType.ResourceHandling, enhancedErrorMessage, { + name: 'download-resource', + filePath: resourcePath, + operation: 'write', + } as ResourceHandlingErrorOptions); + } + } +} diff --git a/api/src/llms/tools/downloadResource.tool/types.ts b/api/src/llms/tools/downloadResource.tool/types.ts new file mode 100644 index 00000000..63573c0a --- /dev/null +++ b/api/src/llms/tools/downloadResource.tool/types.ts @@ -0,0 +1,147 @@ +/** + * Type definitions for the downloadResource tool + */ + +/** + * Authentication configuration for downloadResource requests + */ +export interface AuthConfig { + /** Authentication method */ + type: 'basic' | 'bearer' | 'apikey' | 'none'; + /** Username for basic auth */ + username?: string; + /** Password for basic auth */ + password?: string; + /** Bearer token or API key value */ + token?: string; + /** Header name for API key authentication (default: 'X-API-Key') */ + headerName?: string; + /** Whether to send API key as query parameter instead of header */ + useQueryParam?: boolean; + /** Query parameter name for API key (default: 'api_key') */ + queryParamName?: string; +} + +/** + * Request body configuration for POST/PUT requests + */ +export interface RequestBodyConfig { + /** Content to send in request body */ + content: string; + /** Content-Type header value (e.g., 'application/json', 'application/x-www-form-urlencoded') */ + contentType: string; +} + +/** + * Input schema for downloadResource tool + */ +export interface LLMToolDownloadResourceInput { + /** URL to download from */ + url: string; + /** HTTP method to use */ + method?: 'GET' | 'POST' | 'PUT' | 'PATCH' | 'DELETE'; + /** Custom headers to send */ + headers?: Record; + /** Authentication configuration */ + auth?: AuthConfig; + /** Query parameters to append to URL */ + queryParams?: Record; + /** Request body for POST/PUT requests */ + requestBody?: RequestBodyConfig; + /** Data source ID to write to */ + dataSourceId?: string; + /** Path where to save downloaded content */ + resourcePath: string; + /** Whether to overwrite existing file */ + overwriteExisting?: boolean; + /** Whether to create missing directories */ + createMissingDirectories?: boolean; + /** Whether to include content in conversation messages */ + includeInMessages?: boolean; + /** Whether to follow HTTP redirects */ + followRedirects?: boolean; + /** Maximum number of redirects to follow */ + maxRedirects?: number; + /** Request timeout in milliseconds */ + timeout?: number; + /** Maximum file size to download in bytes */ + maxFileSize?: number; +} + +/** + * Download progress information + */ +export interface DownloadProgress { + /** Number of bytes downloaded so far */ + bytesDownloaded: number; + /** Total content length if available */ + totalBytes?: number; + /** Download progress as percentage (0-100) */ + percentage?: number; +} + +/** + * Content type detection result + */ +export interface ContentTypeInfo { + /** MIME type from response headers */ + mimeType: string; + /** Detected content type category */ + contentType: 'text' | 'image' | 'binary'; + /** File extension derived from MIME type */ + fileExtension?: string; + /** Whether detected type matches file path extension */ + extensionMatch: boolean; + /** Warning message if types don't match */ + warningMessage?: string; +} + +/** + * HTTP response metadata + */ +export interface ResponseMetadata { + /** HTTP status code */ + status: number; + /** HTTP status text */ + statusText: string; + /** Response headers */ + headers: Record; + /** Final URL after redirects */ + finalUrl: string; + /** Number of redirects followed */ + redirectCount: number; + /** Content length from headers */ + contentLength?: number; + /** Content type info */ + contentTypeInfo: ContentTypeInfo; +} + +/** + * Result data for downloadResource tool + */ +export interface LLMToolDownloadResourceResultData { + /** Original request URL */ + url: string; + /** HTTP method used */ + method: string; + /** Path where content was saved */ + resourcePath: string; + /** Resource ID/URI */ + resourceId: string; + /** Response metadata */ + response: ResponseMetadata; + /** Size of downloaded content */ + bytesDownloaded: number; + /** Download duration in milliseconds */ + durationMs: number; + /** Whether this was a new resource */ + isNewResource: boolean; + /** Data source information */ + dataSource: { + dsConnectionId: string; + dsConnectionName: string; + dsProviderType: string; + }; + /** Content for conversation inclusion (if requested) */ + conversationContent?: string; +} diff --git a/api/src/llms/tools/editResource.tool/formatter.browser.tsx b/api/src/llms/tools/editResource.tool/formatter.browser.tsx index d3db4e88..133e0b28 100644 --- a/api/src/llms/tools/editResource.tool/formatter.browser.tsx +++ b/api/src/llms/tools/editResource.tool/formatter.browser.tsx @@ -278,8 +278,8 @@ export const formatLogEntryToolUse = (toolInput: LLMToolInputSchema): LLMToolLog export const formatLogEntryToolResult = ( resultContent: CollaborationLogEntryContentToolResult, ): LLMToolLogEntryFormattedResult => { - const { toolResult, bbResponse } = resultContent as LLMToolEditResourceResult; - const results = getContentArrayFromToolResult(toolResult); + const { toolResults, bbResponse } = resultContent as LLMToolEditResourceResult; + const results = getContentArrayFromToolResult(toolResults); // Safely access bbResponse.data with fallbacks const responseData = bbResponse?.data; diff --git a/api/src/llms/tools/editResource.tool/formatter.console.ts b/api/src/llms/tools/editResource.tool/formatter.console.ts index 238fb593..48b11ea4 100644 --- a/api/src/llms/tools/editResource.tool/formatter.console.ts +++ b/api/src/llms/tools/editResource.tool/formatter.console.ts @@ -236,8 +236,8 @@ export const formatLogEntryToolUse = (toolInput: LLMToolInputSchema): LLMToolLog export const formatLogEntryToolResult = ( resultContent: CollaborationLogEntryContentToolResult, ): LLMToolLogEntryFormattedResult => { - const { toolResult, bbResponse } = resultContent as LLMToolEditResourceResult; - const results = getContentArrayFromToolResult(toolResult); + const { toolResults, bbResponse } = resultContent as LLMToolEditResourceResult; + const results = getContentArrayFromToolResult(toolResults); // Safely access bbResponse.data with fallbacks const responseData = bbResponse?.data; diff --git a/api/src/llms/tools/editResource.tool/tests/blocks.test.ts b/api/src/llms/tools/editResource.tool/tests/blocks.test.ts new file mode 100644 index 00000000..11345b53 --- /dev/null +++ b/api/src/llms/tools/editResource.tool/tests/blocks.test.ts @@ -0,0 +1,118 @@ +import { assert, assertEquals, assertRejects, assertStringIncludes } from 'api/tests/deps.ts'; +import type { LLMAnswerToolUse, LLMMessageContentPartTextBlock } from 'api/llms/llmMessage.ts'; +import { makeOrchestratorControllerStub } from 'api/tests/stubs.ts'; +import { + createTestInteraction, + getProjectEditor, + getTestProvider, + getToolManager, + withTestProject, +} from 'api/tests/testSetup.ts'; +import type { DataSourceProviderType } from 'shared/types/dataSource.ts'; +import { isEditResourceResponse } from '../types.ts'; + +/** + * Block editing tests for EditResource tool + * Tests structured content editing functionality + */ + +Deno.test({ + name: 'EditResourceTool - Blocks - Block capability validation', + fn: async () => { + await withTestProject(async (testProjectId, _testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + const interaction = await createTestInteraction('test-collaboration', 'test-interaction', projectEditor); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('edit_resource'); + assert(tool, 'Failed to get edit_resource tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + try { + // Test block editing on filesystem datasource (should fail) + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-block-capability', + toolName: 'edit_resource', + toolInput: { + // No dataSourceId means primary (filesystem) + resourcePath: 'test.txt', + operations: [ + { + editType: 'blocks', + blocks_operationType: 'update', + blocks_index: 0, + blocks_content: { + _type: 'block', + style: 'normal', + children: [{ _type: 'span', text: 'test', marks: [] }], + }, + }, + ], + }, + }; + + const result = await tool.runTool(interaction, toolUse, projectEditor); + // console.log('Blocks - Block capability validation - bbResponse:', result.bbResponse); + // console.log('Blocks - Block capability validation - toolResponse:', result.toolResponse); + // console.log('Blocks - Block capability validation - toolResults:', result.toolResults); + + // Verify structured bbResponse + assert( + result.bbResponse && typeof result.bbResponse === 'object', + 'bbResponse should be an object', + ); + assert( + isEditResourceResponse(result.bbResponse), + 'bbResponse should have the correct structure for EditResource', + ); + + if (isEditResourceResponse(result.bbResponse)) { + assertEquals( + result.bbResponse.data.resourcePath, + 'test.txt', + `Response resourcePath should be "test.txt"`, + ); + assertEquals( + result.bbResponse.data.operationResults?.[0]?.editType, + 'blocks', + 'Response editType should be "blocks"', + ); + assertEquals( + result.bbResponse.data.operationResults?.[0]?.message, + 'Filesystem does not support block operations', + 'message should be indicate lack of blocks support', + ); + assertEquals( + result.bbResponse.data.operationsApplied, + 1, + 'Response operationsApplied should be 1', + ); + assertEquals( + result.bbResponse.data.operationsSuccessful, + 0, + 'Response operationsSuccessful should be 0', + ); + } + + // Check that both delete operations were processed + assert(Array.isArray(result.toolResults), 'toolResults should be an array'); + // Should have: datasource info + summary + 2 operation results + assert(result.toolResults.length >= 3, 'toolResults should have at least 3 elements for 1 operations'); + + // Verify operation count + const summaryResult = result.toolResults[1] as LLMMessageContentPartTextBlock; + assertStringIncludes(summaryResult.text, 'Some operations skipped or failed. 0/1 operations succeeded'); + } finally { + logChangeAndCommitStub.restore(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); diff --git a/api/src/llms/tools/editResource.tool/tests/formatter.test.ts b/api/src/llms/tools/editResource.tool/tests/formatter.test.ts index 45edb863..5e70b2c0 100644 --- a/api/src/llms/tools/editResource.tool/tests/formatter.test.ts +++ b/api/src/llms/tools/editResource.tool/tests/formatter.test.ts @@ -148,7 +148,7 @@ Deno.test({ name: 'EditResourceTool - Console Formatter - Tool result successful', fn: () => { const resultContent = { - toolResult: [ + toolResults: [ { type: 'text' as const, text: 'Searched data source: local [primary]' }, { type: 'text' as const, @@ -197,7 +197,7 @@ Deno.test({ name: 'EditResourceTool - Console Formatter - Tool result with warnings', fn: () => { const resultContent = { - toolResult: [ + toolResults: [ { type: 'text' as const, text: 'Operated on data source: test-notion [notion-id]' }, { type: 'text' as const, @@ -327,7 +327,7 @@ Deno.test({ name: 'EditResourceTool - Browser Formatter - Tool result', fn: () => { const resultContent = { - toolResult: [ + toolResults: [ { type: 'text' as const, text: 'Searched data source: local [primary]' }, { type: 'text' as const, text: '✅ Operation 1 completed successfully' }, ], diff --git a/api/src/llms/tools/editResource.tool/tests/multiDatasource.test.ts b/api/src/llms/tools/editResource.tool/tests/multiDatasource.test.ts new file mode 100644 index 00000000..efe68f3e --- /dev/null +++ b/api/src/llms/tools/editResource.tool/tests/multiDatasource.test.ts @@ -0,0 +1,83 @@ +import { assert, assertEquals, assertRejects, assertStringIncludes } from 'api/tests/deps.ts'; +import type { LLMAnswerToolUse, LLMMessageContentPartTextBlock } from 'api/llms/llmMessage.ts'; +import { makeOrchestratorControllerStub } from 'api/tests/stubs.ts'; +import { createTestInteraction, getProjectEditor, getToolManager, withTestProject } from 'api/tests/testSetup.ts'; +import type { DataSourceProviderType } from 'shared/types/dataSource.ts'; +import { isEditResourceResponse } from '../types.ts'; + +/** + * Multi-datasource tests for EditResource tool + * Tests functionality across different data source types using extraDatasources parameter + */ + +Deno.test({ + name: 'EditResourceTool - MultiDatasource - Search and replace on filesystem', + fn: async () => { + await withTestProject(async (testProjectId, testProjectRoot) => { + const projectEditor = await getProjectEditor(testProjectId); + const orchestratorControllerStubMaker = makeOrchestratorControllerStub( + projectEditor.orchestratorController, + ); + const interaction = await createTestInteraction('test-collaboration', 'test-interaction', projectEditor); + + const toolManager = await getToolManager(projectEditor); + const tool = await toolManager.getTool('edit_resource'); + assert(tool, 'Failed to get edit_resource tool'); + + const logChangeAndCommitStub = orchestratorControllerStubMaker.logChangeAndCommitStub(() => + Promise.resolve() + ); + try { + // Create a test file in filesystem datasource + const testResource = 'multi-ds-filesystem-test.txt'; + const testResourcePath = `${testProjectRoot}/${testResource}`; + await Deno.writeTextFile(testResourcePath, 'Hello from filesystem!'); + + const toolUse: LLMAnswerToolUse = { + toolValidation: { validated: true, results: '' }, + toolUseId: 'test-multi-ds-filesystem', + toolName: 'edit_resource', + toolInput: { + // No dataSourceId means primary (filesystem) + resourcePath: testResource, + operations: [ + { + editType: 'searchReplace', + searchReplace_search: 'filesystem', + searchReplace_replace: 'local storage', + }, + ], + }, + }; + + const result = await tool.runTool(interaction, toolUse, projectEditor); + + // Verify search and replace worked on filesystem + assertStringIncludes(result.toolResponse, 'All operations succeeded'); + + // Verify structured bbResponse + assert( + result.bbResponse && typeof result.bbResponse === 'object', + 'bbResponse should be an object', + ); + assert( + isEditResourceResponse(result.bbResponse), + 'bbResponse should have the correct EditResource structure', + ); + + if (isEditResourceResponse(result.bbResponse)) { + assertEquals(result.bbResponse.data.operationResults?.[0]?.editType, 'searchReplace'); + assertEquals(result.bbResponse.data.operationsSuccessful, 1); + } + + // Verify the file was actually changed + const updatedContent = await Deno.readTextFile(testResourcePath); + assertEquals(updatedContent, 'Hello from local storage!'); + } finally { + logChangeAndCommitStub.restore(); + } + }); + }, + sanitizeResources: false, + sanitizeOps: false, +}); diff --git a/api/src/llms/tools/editResource.tool/tool.ts b/api/src/llms/tools/editResource.tool/tool.ts index 1ec2091a..71e21ee2 100644 --- a/api/src/llms/tools/editResource.tool/tool.ts +++ b/api/src/llms/tools/editResource.tool/tool.ts @@ -23,6 +23,8 @@ import type { } from 'api/errors/error.ts'; import { isResourceNotFoundError } from 'api/errors/error.ts'; import { logger } from 'shared/logger.ts'; +import { checkDatasourceAccess } from 'api/utils/featureAccess.ts'; +import { enhanceDatasourceError } from '../../../utils/datasourceErrorEnhancement.ts'; import type { SearchReplaceOperation } from 'shared/types/dataSourceResource.ts'; @@ -52,13 +54,23 @@ export default class LLMToolEditResource extends LLMTool { operations: { type: 'array', description: - 'Array of editing operations to apply in sequence. Each operation must specify its editType.', + `Array of editing operations to apply in sequence. Each operation must specify its editType. + +**CRITICAL WORKFLOW:** Before editing, ALWAYS: +1. Load resource with load_resources using contentFormat="structured" +2. Review current structure/indices for range operations or array data for cell operations +3. Plan operations using actual values from loaded content +4. Apply operations using this tool + +**For Google Sheets:** Load with structured format to see tabular arrays for A1 notation +**For Google Docs:** Load with structured format to see character indices for range operations +**For Files:** Load first to see current content before making changes`, items: { type: 'object', properties: { editType: { type: 'string', - enum: ['searchReplace', 'range', 'blocks', 'structuredData'], + enum: ['searchReplace', 'range', 'blocks', 'structuredData', 'cell'], description: 'Type of edit operation to perform.', }, // Search and replace properties @@ -109,7 +121,8 @@ export default class LLMToolEditResource extends LLMTool { properties: { index: { type: 'number', - description: 'Character index for insertion (0-based)', + description: + 'Character index for insertion (1-based: index 1 = first character). IMPORTANT: Google Docs API uses 1-based indexing. Load resource with structured format first to see current indices.', }, tabId: { type: 'string', @@ -125,11 +138,13 @@ export default class LLMToolEditResource extends LLMTool { properties: { startIndex: { type: 'number', - description: 'Start character index (0-based, inclusive)', + description: + 'Start character index (1-based, inclusive: index 1 = first character). IMPORTANT: Google Docs API uses 1-based indexing.', }, endIndex: { type: 'number', - description: 'End character index (0-based, exclusive)', + description: + 'End character index (1-based, exclusive: index after last character). IMPORTANT: Google Docs API uses 1-based indexing.', }, tabId: { type: 'string', @@ -266,6 +281,142 @@ export default class LLMToolEditResource extends LLMTool { description: 'Structured data operation details (structuredData only, future implementation).', }, + // Cell operation properties (for spreadsheet editing) + cell_operationType: { + type: 'string', + enum: [ + 'setValue', + 'setFormula', + 'clear', + 'format', + 'insertRows', + 'insertColumns', + 'deleteRows', + 'deleteColumns', + ], + description: 'Type of cell operation to perform (cell only).', + }, + cell_range: { + type: 'string', + description: + 'Cell range in A1 notation (cell only). Examples: "A1", "A1:B5", "Sheet1!A1:C10".', + }, + cell_values: { + type: 'array', + description: + '2D array of values for setValue operations (cell only). Example: [["Header1", "Header2"], ["Value1", "Value2"]].', + items: { + type: 'array', + items: {}, + }, + }, + cell_formula: { + type: 'string', + description: + 'Formula for setFormula operations (cell only). Must start with = (e.g., "=SUM(A1:A10)").', + }, + cell_format: { + type: 'object', + description: + 'Cell formatting options (cell only, for format operations). **BEST PRACTICE:** Use data-type-specific formatting on homogeneous ranges rather than generic formatting on mixed data types. Apply number formats that match column content (currency, dates, percentages). Use smaller, targeted ranges (like "B2:C5") instead of large mixed ranges (like "A1:N10").', + properties: { + numberFormat: { + type: 'string', + description: 'Number format pattern (e.g., "0.00", "$#,##0.00")', + }, + backgroundColor: { + type: 'string', + description: 'Background color as hex code (e.g., "#FFFF00")', + }, + color: { type: 'string', description: 'Text color as hex code (e.g., "#FF0000")' }, + horizontalAlignment: { + type: 'string', + enum: ['LEFT', 'CENTER', 'RIGHT'], + description: 'Horizontal text alignment', + }, + verticalAlignment: { + type: 'string', + enum: ['TOP', 'MIDDLE', 'BOTTOM'], + description: 'Vertical text alignment', + }, + bold: { type: 'boolean', description: 'Bold text formatting' }, + italic: { type: 'boolean', description: 'Italic text formatting' }, + strikethrough: { type: 'boolean', description: 'Strikethrough text formatting' }, + underline: { type: 'boolean', description: 'Underline text formatting' }, + fontFamily: { type: 'string', description: 'Font family name' }, + fontSize: { type: 'number', description: 'Font size in points' }, + borders: { + type: 'object', + description: 'Cell border formatting', + properties: { + top: { + type: 'object', + description: 'Top border style', + properties: { + style: { + type: 'string', + enum: ['NONE', 'SOLID', 'DASHED', 'DOTTED', 'DOUBLE'], + description: 'Border line style', + }, + color: { + type: 'string', + description: 'Border color as hex code (e.g., "#000000")', + }, + }, + }, + bottom: { + type: 'object', + description: 'Bottom border style', + properties: { + style: { + type: 'string', + enum: ['NONE', 'SOLID', 'DASHED', 'DOTTED', 'DOUBLE'], + description: 'Border line style', + }, + color: { + type: 'string', + description: 'Border color as hex code (e.g., "#000000")', + }, + }, + }, + left: { + type: 'object', + description: 'Left border style', + properties: { + style: { + type: 'string', + enum: ['NONE', 'SOLID', 'DASHED', 'DOTTED', 'DOUBLE'], + description: 'Border line style', + }, + color: { + type: 'string', + description: 'Border color as hex code (e.g., "#000000")', + }, + }, + }, + right: { + type: 'object', + description: 'Right border style', + properties: { + style: { + type: 'string', + enum: ['NONE', 'SOLID', 'DASHED', 'DOTTED', 'DOUBLE'], + description: 'Border line style', + }, + color: { + type: 'string', + description: 'Border color as hex code (e.g., "#000000")', + }, + }, + }, + }, + }, + }, + }, + cell_sheetName: { + type: 'string', + description: 'Target sheet name (cell only). Required for multi-sheet spreadsheets.', + }, }, required: ['editType'], }, @@ -340,10 +491,28 @@ export default class LLMToolEditResource extends LLMTool { const dataSourceRoot = dsConnectionToUse.getDataSourceRoot(); + // Check datasource write access + const hasWriteAccess = await checkDatasourceAccess( + projectEditor.userContext, + dsConnectionToUse.providerType, + 'write', + ); + if (!hasWriteAccess) { + throw createError( + ErrorType.ToolHandling, + `Write access for ${dsConnectionToUse.providerType} not available on your current plan`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'capability-check', + } as ToolHandlingErrorOptions, + ); + } + // Get resource accessor const resourceAccessor = await dsConnectionToUse.getResourceAccessor(); - if (!resourceAccessor.writeResource) { - throw createError(ErrorType.ToolHandling, `No writeResource method on resourceAccessor`, { + if (!resourceAccessor.editResource) { + throw createError(ErrorType.ToolHandling, `No editResource method on resourceAccessor`, { name: 'edit-resource', toolName: 'edit_resource', operation: 'interface-check', @@ -355,12 +524,14 @@ export default class LLMToolEditResource extends LLMTool { `LLMToolEditResource: Applying ${operations.length} operations to resource: ${resourcePath}`, ); + /* if (!resourceAccessor.editResource) { throw createError(ErrorType.ToolHandling, `No writeResource method on resourceAccessor`, { toolName: 'write_resource', operation: 'tool-run', } as ToolHandlingErrorOptions); } + */ // Validate resource path is within datasource const resourceUri = (resourcePath.includes('://') || resourcePath.startsWith('file:')) ? dsConnectionToUse.getUriForResource(resourcePath) // Already a URI, use as is @@ -454,12 +625,22 @@ export default class LLMToolEditResource extends LLMTool { return { toolResults, toolResponse, bbResponse }; } catch (error) { - const errorMessage = `Failed to apply unified edit operations to ${resourcePath}: ${ + const originalErrorMessage = `Failed to apply unified edit operations to ${resourcePath}: ${ (error as Error).message }`; - logger.error(`LLMToolEditResource: ${errorMessage}`); - throw createError(ErrorType.ResourceHandling, errorMessage, { + // Enhance error message with datasource-specific guidance + const enhancedErrorMessage = enhanceDatasourceError( + originalErrorMessage, + dsConnectionToUse.provider, + 'edit', + resourcePath, + interaction, + ); + + logger.error(`LLMToolEditResource: ${enhancedErrorMessage}`); + + throw createError(ErrorType.ResourceHandling, enhancedErrorMessage, { name: 'edit-resource', filePath: resourcePath, operation: 'edit', @@ -522,7 +703,7 @@ export default class LLMToolEditResource extends LLMTool { ); } - const validEditTypes = ['searchReplace', 'range', 'blocks', 'structuredData']; + const validEditTypes = ['searchReplace', 'range', 'blocks', 'structuredData', 'cell']; if (!validEditTypes.includes(operation.editType)) { throw createError( ErrorType.ToolHandling, @@ -535,6 +716,91 @@ export default class LLMToolEditResource extends LLMTool { ); } + // 🚨 CLAUDE 4.5 INPUT CORRECTION SAFEGUARDS 🚨 + // Claude 4.5 frequently ignores tool schemas and uses incorrect property names + // This correction logic fixes the LLM's mistakes automatically + const keyCorrections: Record> = { + searchReplace: { + 'search': 'searchReplace_search', + 'replace': 'searchReplace_replace', + 'regexPattern': 'searchReplace_regexPattern', + 'replaceAll': 'searchReplace_replaceAll', + 'caseSensitive': 'searchReplace_caseSensitive', + }, + range: { + 'rangeType': 'range_rangeType', + 'location': 'range_location', + 'range': 'range_range', + 'text': 'range_text', + 'textStyle': 'range_textStyle', + 'paragraphStyle': 'range_paragraphStyle', + 'fields': 'range_fields', + }, + blocks: { + 'operationType': 'blocks_operationType', + 'index': 'blocks_index', + 'key': 'blocks_key', + 'content': 'blocks_content', + 'position': 'blocks_position', + 'block': 'blocks_block', + 'from': 'blocks_from', + 'to': 'blocks_to', + 'fromKey': 'blocks_fromKey', + 'toPosition': 'blocks_toPosition', + }, + structuredData: { + 'operation': 'structuredData_operation', + }, + cell: { + 'operationType': 'cell_operationType', + 'range': 'cell_range', + 'values': 'cell_values', + 'formula': 'cell_formula', + 'format': 'cell_format', + 'sheetName': 'cell_sheetName', + }, + }; + + // Apply corrections for LLM mistakes + const corrections = keyCorrections[operation.editType]; + if (corrections) { + const correctedKeys: string[] = []; + for (const [incorrectKey, correctKey] of Object.entries(corrections)) { + if ( + Object.prototype.hasOwnProperty.call(operation, incorrectKey) && + !Object.prototype.hasOwnProperty.call(operation, correctKey) + ) { + // 🚨 LOUD LOGGING - BLAME THE LLM 🚨 + logger.error( + `🚨🚨🚨 LLM INPUT SCHEMA VIOLATION DETECTED 🚨🚨🚨`, + ); + logger.error( + `❌ CLAUDE 4.5 FAILED TO FOLLOW TOOL SCHEMA in operation ${index + 1}`, + ); + logger.error( + `❌ LLM used INCORRECT key: '${incorrectKey}' (should be '${correctKey}')`, + ); + logger.error( + `🔧 AUTOMATICALLY CORRECTING LLM'S MISTAKE...`, + ); + + // Apply the correction + operation[correctKey] = operation[incorrectKey]; + delete operation[incorrectKey]; + correctedKeys.push(`${incorrectKey} → ${correctKey}`); + } + } + + if (correctedKeys.length > 0) { + logger.error( + `✅ CORRECTED ${correctedKeys.length} LLM SCHEMA VIOLATIONS: [${correctedKeys.join(', ')}]`, + ); + logger.error( + `⚠️ THIS IS A CLAUDE 4.5 BUG - LLM SHOULD FOLLOW THE PROVIDED SCHEMA EXACTLY`, + ); + } + } + // Validate operations only use properties with matching prefixes const expectedPrefix = `${operation.editType}_`; const otherPrefixes = validEditTypes.filter((type) => type !== operation.editType).map((type) => @@ -562,17 +828,20 @@ export default class LLMToolEditResource extends LLMTool { ); } else { // Invalid property name (no recognized prefix) - throw createError( - ErrorType.ToolHandling, - `Operation ${ - index + 1 - }: invalid property '${prop}' for editType '${operation.editType}' (expected prefix: '${expectedPrefix}')`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, + logger.error( + `LLMToolEditResource: Invalid property ${prop} for operation ${operation.editType} - should start with ${expectedPrefix}`, ); + // throw createError( + // ErrorType.ToolHandling, + // `Operation ${ + // index + 1 + // }: invalid property '${prop}' for editType '${operation.editType}' (expected prefix: '${expectedPrefix}')`, + // { + // name: 'edit-resource', + // toolName: 'edit_resource', + // operation: 'tool-input', + // } as ToolHandlingErrorOptions, + // ); } } } @@ -588,260 +857,386 @@ export default class LLMToolEditResource extends LLMTool { private validateOperationProperties(operation: any, operationNumber: number): void { switch (operation.editType) { case 'searchReplace': - if (typeof operation.searchReplace_search !== 'string') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: searchReplace_search must be a string`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - if (typeof operation.searchReplace_replace !== 'string') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: searchReplace_replace must be a string`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); + { + if (typeof operation.searchReplace_search !== 'string') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: searchReplace_search must be a string`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + if (typeof operation.searchReplace_replace !== 'string') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: searchReplace_replace must be a string`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } } break; case 'range': - if (!operation.range_rangeType || typeof operation.range_rangeType !== 'string') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: range_rangeType must be a string`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - const validRangeTypes = [ - 'insertText', - 'deleteRange', - 'replaceRange', - 'updateTextStyle', - 'updateParagraphStyle', - ]; - if (!validRangeTypes.includes(operation.range_rangeType)) { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: range_rangeType must be one of ${validRangeTypes.join(', ')}`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } + { + if (!operation.range_rangeType || typeof operation.range_rangeType !== 'string') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: range_rangeType must be a string`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + const validRangeTypes = [ + 'insertText', + 'deleteRange', + 'replaceRange', + 'updateTextStyle', + 'updateParagraphStyle', + ]; + if (!validRangeTypes.includes(operation.range_rangeType)) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: range_rangeType must be one of ${ + validRangeTypes.join(', ') + }`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } - // Validate required properties based on range type - switch (operation.range_rangeType) { - case 'insertText': - if (!operation.range_location || typeof operation.range_location !== 'object') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: insertText requires range_location object`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - if (typeof operation.range_location.index !== 'number') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: range_location.index must be a number`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - if (!operation.range_text || typeof operation.range_text !== 'string') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: insertText requires range_text string`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - break; - case 'deleteRange': - if (!operation.range_range || typeof operation.range_range !== 'object') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: deleteRange requires range_range object`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - if ( - typeof operation.range_range.startIndex !== 'number' || - typeof operation.range_range.endIndex !== 'number' - ) { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: range_range.startIndex and endIndex must be numbers`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - break; - case 'replaceRange': - if (!operation.range_range || typeof operation.range_range !== 'object') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: replaceRange requires range_range object`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - if ( - typeof operation.range_range.startIndex !== 'number' || - typeof operation.range_range.endIndex !== 'number' - ) { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: range_range.startIndex and endIndex must be numbers`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - if (!operation.range_text || typeof operation.range_text !== 'string') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: replaceRange requires range_text string`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - break; - case 'updateTextStyle': - case 'updateParagraphStyle': - if (!operation.range_range || typeof operation.range_range !== 'object') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: ${operation.range_rangeType} requires range_range object`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - if ( - typeof operation.range_range.startIndex !== 'number' || - typeof operation.range_range.endIndex !== 'number' - ) { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: range_range.startIndex and endIndex must be numbers`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - // Style validation is optional - if provided, must be objects - if ( - operation.range_rangeType === 'updateTextStyle' && operation.range_textStyle && - typeof operation.range_textStyle !== 'object' - ) { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: range_textStyle must be an object`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - if ( - operation.range_rangeType === 'updateParagraphStyle' && operation.range_paragraphStyle && - typeof operation.range_paragraphStyle !== 'object' - ) { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: range_paragraphStyle must be an object`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); + // Validate required properties based on range type + switch (operation.range_rangeType) { + case 'insertText': + { + if (!operation.range_location || typeof operation.range_location !== 'object') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: insertText requires range_location object`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + if (typeof operation.range_location.index !== 'number') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: range_location.index must be a number`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + if (!operation.range_text || typeof operation.range_text !== 'string') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: insertText requires range_text string`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + } + break; + case 'deleteRange': + { + if (!operation.range_range || typeof operation.range_range !== 'object') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: deleteRange requires range_range object`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + if ( + typeof operation.range_range.startIndex !== 'number' || + typeof operation.range_range.endIndex !== 'number' + ) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: range_range.startIndex and endIndex must be numbers`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + } + break; + case 'replaceRange': + { + if (!operation.range_range || typeof operation.range_range !== 'object') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: replaceRange requires range_range object`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + if ( + typeof operation.range_range.startIndex !== 'number' || + typeof operation.range_range.endIndex !== 'number' + ) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: range_range.startIndex and endIndex must be numbers`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + if (!operation.range_text || typeof operation.range_text !== 'string') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: replaceRange requires range_text string`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + } + break; + case 'updateTextStyle': + case 'updateParagraphStyle': { + if (!operation.range_range || typeof operation.range_range !== 'object') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: ${operation.range_rangeType} requires range_range object`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + if ( + typeof operation.range_range.startIndex !== 'number' || + typeof operation.range_range.endIndex !== 'number' + ) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: range_range.startIndex and endIndex must be numbers`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + // Style validation is optional - if provided, must be objects + if ( + operation.range_rangeType === 'updateTextStyle' && operation.range_textStyle && + typeof operation.range_textStyle !== 'object' + ) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: range_textStyle must be an object`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + if ( + operation.range_rangeType === 'updateParagraphStyle' && + operation.range_paragraphStyle && + typeof operation.range_paragraphStyle !== 'object' + ) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: range_paragraphStyle must be an object`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + break; } - break; + } } break; case 'blocks': - if (!operation.blocks_operationType || typeof operation.blocks_operationType !== 'string') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: blocks_operationType must be a string`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); - } - const validBlockTypes = ['update', 'insert', 'delete', 'move']; - if (!validBlockTypes.includes(operation.blocks_operationType)) { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: blocks_operationType must be one of ${ - validBlockTypes.join(', ') - }`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); + { + if (!operation.blocks_operationType || typeof operation.blocks_operationType !== 'string') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: blocks_operationType must be a string`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + const validBlockTypes = ['update', 'insert', 'delete', 'move']; + if (!validBlockTypes.includes(operation.blocks_operationType)) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: blocks_operationType must be one of ${ + validBlockTypes.join(', ') + }`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } } break; case 'structuredData': - // Minimal validation for structured data (future implementation) - if (!operation.structuredData_operation || typeof operation.structuredData_operation !== 'object') { - throw createError( - ErrorType.ToolHandling, - `Operation ${operationNumber}: structuredData_operation must be an object`, - { - name: 'edit-resource', - toolName: 'edit_resource', - operation: 'tool-input', - } as ToolHandlingErrorOptions, - ); + { + // Minimal validation for structured data (future implementation) + if (!operation.structuredData_operation || typeof operation.structuredData_operation !== 'object') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: structuredData_operation must be an object`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + } + break; + case 'cell': + { + // Validate cell operations for spreadsheet editing + if (!operation.cell_operationType || typeof operation.cell_operationType !== 'string') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: cell_operationType must be a string`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + const validCellOperations = [ + 'setValue', + 'setFormula', + 'clear', + 'format', + 'insertRows', + 'insertColumns', + 'deleteRows', + 'deleteColumns', + ]; + if (!validCellOperations.includes(operation.cell_operationType)) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: cell_operationType must be one of ${ + validCellOperations.join(', ') + }`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + // Validate cell_range is provided + if (!operation.cell_range || typeof operation.cell_range !== 'string') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: cell_range must be a string in A1 notation`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + // Operation-specific validation + switch (operation.cell_operationType) { + case 'setValue': + if (!operation.cell_values || !Array.isArray(operation.cell_values)) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: setValue requires cell_values array`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + break; + case 'setFormula': + if (!operation.cell_formula || typeof operation.cell_formula !== 'string') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: setFormula requires cell_formula string`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + if (!operation.cell_formula.startsWith('=')) { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: cell_formula must start with = (e.g., "=SUM(A1:A10)")`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + break; + case 'format': + if (!operation.cell_format || typeof operation.cell_format !== 'object') { + throw createError( + ErrorType.ToolHandling, + `Operation ${operationNumber}: format requires cell_format object`, + { + name: 'edit-resource', + toolName: 'edit_resource', + operation: 'tool-input', + } as ToolHandlingErrorOptions, + ); + } + break; + // Other cell operations (clear, insertRows, deleteColumns, etc.) have minimal requirements + default: + // cell_range is already validated above + break; + } } break; } diff --git a/api/src/llms/tools/editResource.tool/types.ts b/api/src/llms/tools/editResource.tool/types.ts index 59802404..8d641664 100644 --- a/api/src/llms/tools/editResource.tool/types.ts +++ b/api/src/llms/tools/editResource.tool/types.ts @@ -101,7 +101,7 @@ export interface LLMToolEditResourceResponseData { * Complete result for edit_resource tool (updated to match writeResource pattern) */ export interface LLMToolEditResourceResult { - toolResult: LLMToolRunResultContent; + toolResults: LLMToolRunResultContent; bbResponse: LLMToolEditResourceResponseData; } diff --git a/api/src/llms/tools/fetchWebPage.tool/formatter.browser.tsx b/api/src/llms/tools/fetchWebPage.tool/formatter.browser.tsx index caa5ea5c..8c2c3c1f 100644 --- a/api/src/llms/tools/fetchWebPage.tool/formatter.browser.tsx +++ b/api/src/llms/tools/fetchWebPage.tool/formatter.browser.tsx @@ -25,11 +25,11 @@ export const formatLogEntryToolUse = (toolInput: LLMToolInputSchema): LLMToolLog export const formatLogEntryToolResult = ( resultContent: CollaborationLogEntryContentToolResult, ): LLMToolLogEntryFormattedResult => { - const { toolResult, bbResponse } = resultContent as LLMToolFetchWebPageResult; + const { toolResults, bbResponse } = resultContent as LLMToolFetchWebPageResult; if (typeof bbResponse === 'object' && 'data' in bbResponse) { const { url } = bbResponse.data; - const content = getContentFromToolResult(toolResult); + const content = getContentFromToolResult(toolResults); const contentPreview = content.length > 500 ? content.slice(0, 500) + '...' : content; const contentElement = LLMTool.TOOL_TAGS_BROWSER.base.container( diff --git a/api/src/llms/tools/fetchWebPage.tool/formatter.console.ts b/api/src/llms/tools/fetchWebPage.tool/formatter.console.ts index ec1e57a5..8d9eb33e 100644 --- a/api/src/llms/tools/fetchWebPage.tool/formatter.console.ts +++ b/api/src/llms/tools/fetchWebPage.tool/formatter.console.ts @@ -23,11 +23,11 @@ export const formatLogEntryToolUse = (toolInput: LLMToolInputSchema): LLMToolLog export const formatLogEntryToolResult = ( resultContent: CollaborationLogEntryContentToolResult, ): LLMToolLogEntryFormattedResult => { - const { toolResult, bbResponse } = resultContent as LLMToolFetchWebPageResult; + const { toolResults, bbResponse } = resultContent as LLMToolFetchWebPageResult; if (typeof bbResponse === 'object' && 'data' in bbResponse) { const { url } = bbResponse.data; - const content = getContentFromToolResult(toolResult); + const content = getContentFromToolResult(toolResults); const contentPreview = content.length > 500 ? content.slice(0, 500) + '...' : content; const formattedContent = stripIndents` diff --git a/api/src/llms/tools/fetchWebPage.tool/info.json b/api/src/llms/tools/fetchWebPage.tool/info.json index 4153f8fe..94caf5b3 100644 --- a/api/src/llms/tools/fetchWebPage.tool/info.json +++ b/api/src/llms/tools/fetchWebPage.tool/info.json @@ -1,7 +1,9 @@ { "name": "fetch_web_page", - "description": "Fetches the content of a specified web page. Returns the text content of HTML pages, stripping scripts and styles. For visual content, use fetch_web_screenshot instead. Some sites may block automated access or require authentication. Response size may be limited.", + "description": "Fetches web page content using a full headless browser for complete content hydration including JavaScript, dynamic loading, and complex authentication flows. Returns clean text content with scripts and styles removed. Use this when pages require browser context (e.g., GitHub pages, SPAs, dynamic content). For static resources, direct API calls, or raw files, use download_resource instead. For visual content, use fetch_web_screenshot.", "version": "1.0.0", "author": "BB Team", - "license": "MIT" + "license": "MIT", + "enabled": true, + "category": "data-retrieval" } diff --git a/api/src/llms/tools/fetchWebPage.tool/tests/tool.test.ts b/api/src/llms/tools/fetchWebPage.tool/tests/tool.test.ts index 340df130..533d36f2 100644 --- a/api/src/llms/tools/fetchWebPage.tool/tests/tool.test.ts +++ b/api/src/llms/tools/fetchWebPage.tool/tests/tool.test.ts @@ -32,7 +32,7 @@ Deno.test({ const tool = await toolManager.getTool('fetch_web_page'); assert(tool, 'Failed to get tool'); - const url = 'https://google.com'; + const url = 'https://www.google.com'; const toolUse: LLMAnswerToolUse = { toolValidation: { validated: true, results: '' }, toolUseId: 'test-id', @@ -61,10 +61,14 @@ Deno.test({ ); if (isFetchWebPageResponse(result.bbResponse)) { - assert(result.bbResponse.data.html.startsWith(' + + + ${previewContent.innerHTML} + + + `); + printWindow.document.close(); + printWindow.print(); + }; + + const isEntryExpanded = (logDataEntry: CollaborationLogDataEntry, index: number): boolean => { + if (!logDataEntryHasLogEntry(logDataEntry)) return true; + + const entryType = logDataEntry.logEntry + .entryType as keyof typeof import('../utils/messageUtils.utils.tsx').defaultExpanded; + return getInitialCollapseState( + collaborationId || 'default', + logDataEntry.agentInteractionId || null, + index, + entryType, + ); + }; + + const collectMessageContent = (mode: ExportMode): string => { + if (!logDataEntries || logDataEntries.length === 0) { + return 'No conversation content to copy.'; + } + + let markdown = ''; + + // Add conversation title if available + if (collaborationTitle) { + markdown += `# ${collaborationTitle}\n\n`; + } + + // For conversation mode, consolidate sequential entries of the same type + if (mode === 'conversation') { + const consolidatedEntries: Array<{ + entryType: string; + combinedContent: string; + lastTimestamp: string; + headerTitle: string; + }> = []; + + logDataEntries.forEach((logDataEntry) => { + if (!logDataEntryHasLogEntry(logDataEntry)) { + return; // Skip conversation start entries for conversation mode + } + + const entryType = logDataEntry.logEntry.entryType; + const content = logDataEntry.logEntry.content; + const timestamp = new Date(logDataEntry.timestamp).toLocaleString(); + + // Only include user and assistant messages for conversation mode + if (!['user', 'assistant', 'answer', 'orchestrator'].includes(entryType)) { + return; + } + + // Determine header title + let headerTitle = ''; + switch (entryType) { + case 'user': + headerTitle = projectConfig?.myPersonsName || 'User'; + break; + case 'assistant': + case 'answer': + headerTitle = projectConfig?.myAssistantsName || 'Assistant'; + break; + case 'orchestrator': + headerTitle = 'Orchestrator'; + break; + default: + headerTitle = entryType.charAt(0).toUpperCase() + entryType.slice(1); + } + + const contentText = typeof content === 'string' ? content : JSON.stringify(content, null, 2); + + // Check if we can consolidate with the last entry + const lastEntry = consolidatedEntries[consolidatedEntries.length - 1]; + if (lastEntry && lastEntry.entryType === entryType) { + // Consolidate with previous entry + lastEntry.combinedContent += '\n\n' + contentText; + lastEntry.lastTimestamp = timestamp; // Use timestamp from last entry + } else { + // Create new consolidated entry + consolidatedEntries.push({ + entryType, + combinedContent: contentText, + lastTimestamp: timestamp, + headerTitle, + }); + } + }); + + // Generate markdown from consolidated entries + consolidatedEntries.forEach((entry, index) => { + markdown += `## ${entry.headerTitle}\n*${entry.lastTimestamp}*\n\n`; + markdown += entry.combinedContent + '\n\n'; + + // Add separator between entries (except for the last one) + if (index < consolidatedEntries.length - 1) { + markdown += '---\n\n'; + } + }); + + return markdown.trim(); + } + + // Process each log entry (for 'all' and 'displayed' modes) + logDataEntries.forEach((logDataEntry, index) => { + if (!logDataEntryHasLogEntry(logDataEntry)) { + // Handle conversation start entries + if ((mode as ExportMode) !== 'conversation') { + markdown += `---\n\n**Conversation Start**\n\n`; + } + return; + } + + const entryType = logDataEntry.logEntry.entryType; + const content = logDataEntry.logEntry.content; + const timestamp = new Date(logDataEntry.timestamp).toLocaleString(); + const isExpanded = isEntryExpanded(logDataEntry, index); + console.log('ToolBar: logDataEntry', logDataEntry); + console.log('ToolBar: content', { entryType, content }); + + // Filter based on mode + if ((mode as ExportMode) === 'conversation') { + // Only include user and assistant messages + if (!['user', 'assistant', 'answer', 'orchestrator'].includes(entryType)) { + return; + } + } else if (mode === 'displayed' && !isExpanded) { + // For collapsed entries, use formatted result preview if available, otherwise fallback + let headerTitle = ''; + let summaryText = ''; + + // Fallback to original logic when formatted data not available + if (typeof content === 'string') { + summaryText = getContentSummary(content, 100); + } else { + // For structured content, create a readable summary + if (entryType === 'tool_use' && content && typeof content === 'object') { + const toolData = content as any; + const keys = Object.keys(toolData).slice(0, 3); + if (keys.length > 0) { + const paramSummary = keys.map((key) => { + const value = toolData[key]; + if (typeof value === 'string') { + return `${key}: ${value.slice(0, 30)}${value.length > 30 ? '...' : ''}`; + } else { + return `${key}: ${typeof value}`; + } + }).join(', '); + summaryText = getContentSummary(paramSummary, 100); + } else { + summaryText = 'No parameters'; + } + } else if (entryType === 'tool_result' && content && typeof content === 'object') { + const resultData = content as any; + if (resultData.success !== undefined) { + const status = resultData.success ? 'Success' : 'Failed'; + const message = resultData.message || resultData.error || ''; + summaryText = getContentSummary(`${status}: ${message}`, 100); + } else if (resultData.content || resultData.result) { + const resultContent = String(resultData.content || resultData.result); + summaryText = getContentSummary(resultContent, 100); + } else { + const keys = Object.keys(resultData).slice(0, 3); + summaryText = keys.length > 0 ? `Contains: ${keys.join(', ')}` : 'Empty result'; + } + } else if (entryType === 'auxiliary' && content && typeof content === 'object') { + summaryText = (content as AuxiliaryChatContent).message; + } else { + const jsonString = JSON.stringify(content, null, 2); + summaryText = getContentSummary(jsonString, 100); + } + } + // Set header title for fallback + headerTitle = entryType === 'tool_use' + ? `Tool Input: ${logDataEntry.logEntry.toolName || 'Unknown Tool'}` + : entryType === 'tool_result' + ? `Tool Output: ${logDataEntry.logEntry.toolName || 'Unknown Tool'}` + : entryType.charAt(0).toUpperCase() + entryType.slice(1); + + // Add entry header with improved formatting + markdown += `### ${headerTitle} (collapsed)\n*${timestamp}*\n\n`; + markdown += `*${summaryText}*\n\n`; + + // Add separator + if (index < logDataEntries.length - 1) { + markdown += '---\n\n'; + } + return; + } + + // Add entry header based on type + switch (entryType) { + case 'user': + markdown += `## ${projectConfig?.myPersonsName || 'User'}\n*${timestamp}*\n\n`; + break; + case 'assistant': + case 'answer': + markdown += `## ${projectConfig?.myAssistantsName || 'Assistant'}\n*${timestamp}*\n\n`; + break; + case 'orchestrator': + markdown += `## Orchestrator\n*${timestamp}*\n\n`; + break; + case 'tool_use': + markdown += `### Tool Input: ${ + logDataEntry.logEntry.toolName || 'Unknown Tool' + }\n*${timestamp}*\n\n`; + break; + case 'tool_result': + markdown += `### Tool Output: ${ + logDataEntry.logEntry.toolName || 'Unknown Tool' + }\n*${timestamp}*\n\n`; + break; + default: + markdown += `### ${entryType.charAt(0).toUpperCase() + entryType.slice(1)}\n*${timestamp}*\n\n`; + } + + // Add content + if (typeof content === 'string') { + markdown += content + '\n\n'; + } else { + if (entryType === 'auxiliary' && content && typeof content === 'object') { + markdown += (content as AuxiliaryChatContent).message + '\n\n'; + } else { // For non-string content (like tool input/output), format as code block + markdown += '```json\n' + JSON.stringify(content, null, 2) + '\n```\n\n'; + } + } + + // Add separator between entries (except for the last one) + if (index < logDataEntries.length - 1) { + markdown += '---\n\n'; + } + }); + + return markdown.trim(); + }; + const handleAddFilesTemplate = () => { if (!chatInputRef.current) return; @@ -113,6 +623,21 @@ export function ToolBar({ onSendMessage, chatInputRef, disabled, projectId, apiC return () => window.removeEventListener('keydown', handleKeyDown); }, [showHelp, chatInputRef]); + // Handle click outside dropdown + useEffect(() => { + if (!showExportDropdown) return; + + const handleClickOutside = (event: MouseEvent) => { + const target = event.target as HTMLElement; + if (!target.closest('.export-dropdown-container')) { + setShowExportDropdown(false); + } + }; + + document.addEventListener('mousedown', handleClickOutside); + return () => document.removeEventListener('mousedown', handleClickOutside); + }, [showExportDropdown]); + return ( <>
@@ -144,6 +669,86 @@ export function ToolBar({ onSendMessage, chatInputRef, disabled, projectId, apiC + {/* Export Dropdown */} +
+ + + {/* Dropdown Menu */} + {showExportDropdown && ( +
+
+ +
+ + + +
+
+ )} +
+ {/* Divider */}
@@ -206,6 +811,113 @@ export function ToolBar({ onSendMessage, chatInputRef, disabled, projectId, apiC onClose={() => setShowHelp(false)} apiClient={apiClient} /> + + {/* Preview Overlay */} + {showPreviewOverlay && ( +
+
+ {/* Header */} +
+
+

+ {collaborationTitle ? `Preview: ${collaborationTitle}` : 'Conversation Preview'} +

+ +
+ {/* Content Type Toggles */} +
+ Content: + + + +
+
+ {/* Copy Button */} + + {/* Print Button */} + +
+
+ + {/* Content */} +
+
+
+
+
+ )} ); } diff --git a/bui/src/fresh.config.ts b/bui/src/fresh.config.ts index 6a7185e2..9beca581 100644 --- a/bui/src/fresh.config.ts +++ b/bui/src/fresh.config.ts @@ -16,7 +16,7 @@ import { buiFileLogger } from 'bui/utils/fileLogger.ts'; //import { supabaseAuthPlugin } from './plugins/supabaseAuth.ts'; //import { authPlugin } from './plugins/auth.plugin.ts'; import { stateConfigPlugin } from './plugins/stateConfig.plugin.ts'; -import type { FreshAppState } from 'bui/types/state.ts'; +//import type { FreshAppState } from 'bui/types/state.ts'; // CWD is set by `bb` in Deno.Command, or implicitly set by user if calling bb-bui directly diff --git a/bui/src/fresh.gen.ts b/bui/src/fresh.gen.ts index 4953835f..e28a3afa 100644 --- a/bui/src/fresh.gen.ts +++ b/bui/src/fresh.gen.ts @@ -7,8 +7,6 @@ import * as $_500 from './routes/_500.tsx'; import * as $_app from './routes/_app.tsx'; import * as $_middleware from './routes/_middleware.ts'; import * as $api_v1_config_supabase from './routes/api/v1/config/supabase.ts'; -import * as $api_v1_oauth_google_config from './routes/api/v1/oauth/google/config.ts'; -import * as $api_v1_oauth_google_token from './routes/api/v1/oauth/google/token.ts'; import * as $api_v1_status from './routes/api/v1/status.ts'; import * as $app_chat_index from './routes/app/chat/index.tsx'; import * as $app_chat_partial from './routes/app/chat/partial.tsx'; @@ -28,11 +26,10 @@ import * as $auth_update_password_index from './routes/auth/update-password/inde import * as $auth_verify_index from './routes/auth/verify/index.tsx'; import * as $doctor from './routes/doctor.tsx'; import * as $index from './routes/index.tsx'; -import * as $oauth_google_callback from './routes/oauth/google/callback.tsx'; +import * as $oauth_mcp_serverId_callback from './routes/oauth/mcp/[serverId]/callback.tsx'; import * as $AppSettings from './islands/AppSettings.tsx'; import * as $AppSettings_AppearanceSettings from './islands/AppSettings/AppearanceSettings.tsx'; import * as $AppSettings_DefaultProjectSettings from './islands/AppSettings/DefaultProjectSettings.tsx'; -import * as $AppSettings_MCPServerItem from './islands/AppSettings/MCPServerItem.tsx'; import * as $AppSettings_MCPServersSection from './islands/AppSettings/MCPServersSection.tsx'; import * as $AppSettings_NotificationSettings from './islands/AppSettings/NotificationSettings.tsx'; import * as $AppSettings_PlansAndCreditsTab from './islands/AppSettings/PlansAndCreditsTab.tsx'; @@ -70,8 +67,6 @@ const manifest = { './routes/_app.tsx': $_app, './routes/_middleware.ts': $_middleware, './routes/api/v1/config/supabase.ts': $api_v1_config_supabase, - './routes/api/v1/oauth/google/config.ts': $api_v1_oauth_google_config, - './routes/api/v1/oauth/google/token.ts': $api_v1_oauth_google_token, './routes/api/v1/status.ts': $api_v1_status, './routes/app/chat/index.tsx': $app_chat_index, './routes/app/chat/partial.tsx': $app_chat_partial, @@ -91,13 +86,12 @@ const manifest = { './routes/auth/verify/index.tsx': $auth_verify_index, './routes/doctor.tsx': $doctor, './routes/index.tsx': $index, - './routes/oauth/google/callback.tsx': $oauth_google_callback, + './routes/oauth/mcp/[serverId]/callback.tsx': $oauth_mcp_serverId_callback, }, islands: { './islands/AppSettings.tsx': $AppSettings, './islands/AppSettings/AppearanceSettings.tsx': $AppSettings_AppearanceSettings, './islands/AppSettings/DefaultProjectSettings.tsx': $AppSettings_DefaultProjectSettings, - './islands/AppSettings/MCPServerItem.tsx': $AppSettings_MCPServerItem, './islands/AppSettings/MCPServersSection.tsx': $AppSettings_MCPServersSection, './islands/AppSettings/NotificationSettings.tsx': $AppSettings_NotificationSettings, './islands/AppSettings/PlansAndCreditsTab.tsx': $AppSettings_PlansAndCreditsTab, diff --git a/bui/src/hooks/useAppState.ts b/bui/src/hooks/useAppState.ts index 9a936613..d9dfb025 100644 --- a/bui/src/hooks/useAppState.ts +++ b/bui/src/hooks/useAppState.ts @@ -9,17 +9,20 @@ import { createWebSocketManagerApp, type WebSocketManagerApp } from '../utils/we import { type ApiClient, createApiClientManager } from '../utils/apiClient.utils.ts'; import { getApiHostname, getApiPort, getApiUseTls } from '../utils/url.utils.ts'; import { getWorkingApiUrl } from '../utils/connectionManager.utils.ts'; +import type { BuiConfig } from 'shared/config/types.ts'; +//import { BuiConfigDefaults } from 'shared/config/types.ts'; export interface AppState { systemMeta: SystemMeta | null; wsManager: WebSocketManagerApp | null; apiClient: ApiClient | null; status: WebSocketStatus; + path: string | null; error: string | null; versionInfo: VersionInfo | undefined; projectId: ProjectId | null; collaborationId: string | null; - path: string; + buiConfig: BuiConfig | null; } // Load initial state from localStorage and URL @@ -64,6 +67,7 @@ const appState = signal({ }, error: null, versionInfo: undefined, + buiConfig: null, ...loadStoredState(), }); @@ -137,22 +141,24 @@ async function initializeAppStateAsync() { }); // Initialize app state with the working URLs - initializeAppState({ - wsUrl: wsUrl, - apiUrl: apiUrl, - onMessage: (message) => { - console.log('useAppState: Received message:', message); - }, - onError: (error) => { - console.error('useAppState: WebSocket error:', error); - }, - onClose: () => { - console.log('useAppState: WebSocket closed'); - }, - onOpen: () => { - console.log('useAppState: WebSocket opened'); + initializeAppState( + { + wsUrl: wsUrl, + apiUrl: apiUrl, + onMessage: (message) => { + console.log('useAppState: Received message:', message); + }, + onError: (error) => { + console.error('useAppState: WebSocket error:', error); + }, + onClose: () => { + console.log('useAppState: WebSocket closed'); + }, + onOpen: () => { + console.log('useAppState: WebSocket opened'); + }, }, - }); + ); } catch (error) { console.error('useAppState: Failed to initialize connection:', error); @@ -190,6 +196,21 @@ async function initializeAppStateAsync() { } } +export function setBuiConfig(buiConfig: BuiConfig) { + const redactedConfig = { + ...buiConfig, + googleOauth: { + ...buiConfig.googleOauth, + clientSecret: '[REDACTED]', + }, + }; + + appState.value = { + ...appState.value, + buiConfig: redactedConfig, + }; +} + export function setPath(path: string) { appState.value = { ...appState.value, @@ -222,6 +243,7 @@ export function initializeAppState(config: WebSocketConfigApp): void { apiUrl: config.apiUrl, wsUrl: config.wsUrl, }); + if (appState.value.wsManager || appState.value.apiClient) { console.log('AppState already initialized'); return; @@ -367,5 +389,6 @@ export function cleanupAppState(): void { projectId, collaborationId, path: '/', + buiConfig: null, }; } diff --git a/bui/src/hooks/useChatState.ts b/bui/src/hooks/useChatState.ts index 0ba33179..6eb9ca6b 100755 --- a/bui/src/hooks/useChatState.ts +++ b/bui/src/hooks/useChatState.ts @@ -310,7 +310,7 @@ export function useChatState( // Update collaborations array with the loaded collaboration const updatedCollaborations = [...collaborations]; - if (collaboration) { + if (collaboration && !collaboration.error) { //{error: "Collaboration not found"} //console.log(`useChatState: url/projectId effect[${effectId}]: initialize-collaboration`, collaboration); const existingIndex = updatedCollaborations.findIndex((c) => c.id === collaboration.id); if (existingIndex >= 0) { @@ -1041,7 +1041,7 @@ export function useChatState( // modelConfig: collaboration.lastInteractionMetadata?.modelConfig, // interactionStats: collaboration.lastInteractionMetadata?.interactionStats, // llmProviderName: collaboration.lastInteractionMetadata?.llmProviderName || 'anthropic', - // model: collaboration.lastInteractionMetadata?.model || 'claude-sonnet-4-20250514', + // model: collaboration.lastInteractionMetadata?.model || 'claude-sonnet-4-5-20250929', // createdAt: collaboration.lastInteractionMetadata?.createdAt || new Date().toISOString(), // updatedAt: collaboration.lastInteractionMetadata?.updatedAt || new Date().toISOString(), // }; diff --git a/bui/src/hooks/useLogEntryFilterState.ts b/bui/src/hooks/useLogEntryFilterState.ts new file mode 100644 index 00000000..c8efd021 --- /dev/null +++ b/bui/src/hooks/useLogEntryFilterState.ts @@ -0,0 +1,92 @@ +import { signal } from '@preact/signals'; +import type { FilterPreset, LogEntryFilterState, LogEntryType } from '../types/logEntryFilter.types.ts'; +import { + applyPreset, + clearAllTypes, + loadCollaborationFilterState, + saveCollaborationFilterState, + selectAllTypes, + toggleFilterType, +} from '../utils/logEntryFilterState.utils.ts'; +import { DEFAULT_FILTER_STATE } from '../types/logEntryFilter.types.ts'; + +/** + * Global filter state signal + */ +const filterState = signal({ ...DEFAULT_FILTER_STATE }); + +/** + * Current collaboration ID for persistence + */ +let currentCollaborationId: string | null = null; + +/** + * Hook for managing log entry filter state + */ +export function useLogEntryFilterState() { + /** + * Initialize filter state for a collaboration + */ + const initializeFilterState = (collaborationId: string) => { + currentCollaborationId = collaborationId; + const state = loadCollaborationFilterState(collaborationId); + filterState.value = state; + }; + + /** + * Save current filter state + */ + const saveFilterState = () => { + if (!currentCollaborationId) return; + saveCollaborationFilterState(currentCollaborationId, filterState.value); + }; + + /** + * Set filter preset + */ + const setPreset = (preset: FilterPreset) => { + filterState.value = applyPreset(preset); + saveFilterState(); + }; + + /** + * Toggle a specific entry type + */ + const toggleType = (type: LogEntryType) => { + filterState.value = toggleFilterType(filterState.value, type); + saveFilterState(); + }; + + /** + * Select all entry types + */ + const selectAll = () => { + filterState.value = selectAllTypes(); + saveFilterState(); + }; + + /** + * Clear all entry types + */ + const clearAll = () => { + filterState.value = clearAllTypes(); + saveFilterState(); + }; + + /** + * Check if a type is currently selected + */ + const isTypeSelected = (type: LogEntryType): boolean => { + return filterState.value.customTypes.has(type); + }; + + return { + filterState, + initializeFilterState, + setPreset, + toggleType, + selectAll, + clearAll, + isTypeSelected, + }; +} diff --git a/bui/src/islands/AppSettings/DefaultProjectSettings.tsx b/bui/src/islands/AppSettings/DefaultProjectSettings.tsx index 2f217162..f360b5cc 100644 --- a/bui/src/islands/AppSettings/DefaultProjectSettings.tsx +++ b/bui/src/islands/AppSettings/DefaultProjectSettings.tsx @@ -169,8 +169,8 @@ export default function DefaultProjectSettings() { extendedThinkingBudget: 4000, activeTab: 'general', defaultModels: { - orchestrator: 'claude-sonnet-4-20250514', - agent: 'claude-sonnet-4-20250514', + orchestrator: 'claude-sonnet-4-5-20250929', + agent: 'claude-sonnet-4-5-20250929', chat: 'claude-3-5-haiku-20241022', }, }); @@ -194,8 +194,8 @@ export default function DefaultProjectSettings() { extendedThinkingBudget: config.api.extendedThinking?.budgetTokens ?? 4000, activeTab: 'general', defaultModels: { - orchestrator: config.defaultModels?.orchestrator || 'claude-sonnet-4-20250514', - agent: config.defaultModels?.agent || 'claude-sonnet-4-20250514', + orchestrator: config.defaultModels?.orchestrator || 'claude-sonnet-4-5-20250929', + agent: config.defaultModels?.agent || 'claude-sonnet-4-5-20250929', chat: config.defaultModels?.chat || 'claude-3-5-haiku-20241022', }, }); diff --git a/bui/src/islands/AppSettings/MCPServerItem.tsx b/bui/src/islands/AppSettings/MCPServerItem.tsx deleted file mode 100644 index e52a109e..00000000 --- a/bui/src/islands/AppSettings/MCPServerItem.tsx +++ /dev/null @@ -1,423 +0,0 @@ -import { MCPServerConfig } from 'shared/config/types.ts'; -import { useEffect, useState } from 'preact/hooks'; - -interface MCPServerItemProps { - server: MCPServerConfig; - isEditing: boolean; - toggleEdit: () => void; - onUpdate: (updatedServer: MCPServerConfig) => void; - onDelete: () => void; - onSave: () => void; -} - -const sensitiveEnvVarPatterns = [ - /token/i, - /key/i, - /secret/i, - /password/i, - /credential/i, -]; - -export default function MCPServerItem({ - server, - isEditing, - toggleEdit, - onUpdate, - onDelete, - onSave, -}: MCPServerItemProps) { - const [showSensitiveValues, setShowSensitiveValues] = useState(false); - const [newArgument, setNewArgument] = useState(''); - const [newEnvKey, setNewEnvKey] = useState(''); - const [newEnvValue, setNewEnvValue] = useState(''); - - const handleInputChange = (field: keyof MCPServerConfig, value: string | string[] | Record) => { - onUpdate({ - ...server, - [field]: value, - }); - }; - - const handleAddArgument = () => { - if (!newArgument.trim()) return; - - const newArgs = [...(server.args || []), newArgument.trim()]; - handleInputChange('args', newArgs); - setNewArgument(''); - }; - - const handleRemoveArgument = (index: number) => { - const newArgs = [...(server.args || [])]; - newArgs.splice(index, 1); - handleInputChange('args', newArgs); - }; - - const handleUpdateArgument = (index: number, value: string) => { - const newArgs = [...(server.args || [])]; - newArgs[index] = value; - handleInputChange('args', newArgs); - }; - - const handleAddEnvVar = () => { - if (!newEnvKey.trim()) return; - - const newEnv = { ...(server.env || {}) }; - newEnv[newEnvKey.trim()] = newEnvValue; - handleInputChange('env', newEnv); - - // Clear inputs for next entry - setNewEnvKey(''); - setNewEnvValue(''); - }; - - const handleRemoveEnvVar = (keyToRemove: string) => { - const newEnv = { ...(server.env || {}) }; - delete newEnv[keyToRemove]; - handleInputChange('env', newEnv); - }; - - const handleUpdateEnvValue = (key: string, newValue: string) => { - const newEnv = { ...(server.env || {}) }; - newEnv[key] = newValue; - handleInputChange('env', newEnv); - }; - - const hasAnyEnvVars = server.env && Object.keys(server.env).length > 0; - const hasSensitiveEnvVars = hasAnyEnvVars && - Object.keys(server.env || {}).some((key) => sensitiveEnvVarPatterns.some((pattern) => pattern.test(key))); - - if (isEditing) { - return ( -
-
-
- - handleInputChange('id', (e.target as HTMLInputElement).value)} - class='mt-1 form-input block w-full rounded-md border-gray-300 dark:border-gray-600 shadow-sm focus:border-blue-500 focus:ring-blue-500 dark:bg-gray-700 dark:text-white text-sm px-3 py-2' - placeholder='slack' - readOnly // ID shouldn't be changed once set - /> -
- -
- - handleInputChange('name', (e.target as HTMLInputElement).value)} - class='mt-1 form-input block w-full rounded-md border-gray-300 dark:border-gray-600 shadow-sm focus:border-blue-500 focus:ring-blue-500 dark:bg-gray-700 dark:text-white text-sm px-3 py-2' - placeholder='Slack' - /> -
- -
- - handleInputChange('command', (e.target as HTMLInputElement).value)} - class='mt-1 form-input block w-full rounded-md border-gray-300 dark:border-gray-600 shadow-sm focus:border-blue-500 focus:ring-blue-500 dark:bg-gray-700 dark:text-white text-sm px-3 py-2' - placeholder='npx' - /> -
- - {/* Arguments - Structured UI */} -
- - -
- {/* Existing arguments */} - {(server.args || []).map((arg, index) => ( -
- - handleUpdateArgument(index, (e.target as HTMLInputElement).value)} - class='flex-1 px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 dark:text-white text-sm' - /> - -
- ))} - - {/* Add new argument field */} -
- setNewArgument((e.target as HTMLInputElement).value)} - placeholder='Add argument...' - class='flex-1 px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 dark:text-white text-sm' - onBlur={() => { - if (newArgument.trim()) { - handleAddArgument(); - } - }} - onKeyDown={(e) => { - if (e.key === 'Enter' && newArgument.trim()) { - e.preventDefault(); - handleAddArgument(); - } - }} - /> - -
- -

- Press Enter or click + to add each argument. Each value will be passed as a separate - argument to the command. -

-
-
- - {/* Environment Variables - Structured UI */} -
-
- - - {hasSensitiveEnvVars && ( - - )} -
- - {/* Existing environment variables */} -
- {Object.entries(server.env || {}).map(([key, value]) => ( -
- - - handleUpdateEnvValue(key, (e.target as HTMLInputElement).value)} - class='flex-[0.6] px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 text-sm font-mono' - /> - -
- ))} -
- - {/* Add new environment variable */} -
- setNewEnvKey((e.target as HTMLInputElement).value)} - placeholder='Key' - class='flex-[0.4] px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 text-sm' - onBlur={(e) => { - // Do not add on blur of key field, only from value field - }} - onKeyDown={(e) => { - if (e.key === 'Enter' && newEnvKey.trim()) { - e.preventDefault(); - handleAddEnvVar(); - } - }} - /> - setNewEnvValue((e.target as HTMLInputElement).value)} - placeholder='Value' - class='flex-[0.6] px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 text-sm' - onBlur={() => { - if (newEnvKey.trim()) { - handleAddEnvVar(); - } - }} - onKeyDown={(e) => { - if (e.key === 'Enter' && newEnvKey.trim()) { - e.preventDefault(); - handleAddEnvVar(); - } - }} - /> - -
- -

- Environment variables are passed to the MCP server when it runs. Use these to provide API - keys and configuration. -

-
-
- -
- - -
-
- ); - } - - // Non-editing view (remains mostly unchanged) - return ( -
-
-
-

{server.name || server.id}

- {server.id !== server.name && ( - - ID: {server.id} - - )} -
- -
- - - -
-
- -
-
- Command: {server.command} -
- - {server.args && server.args.length > 0 && ( -
- Args: {server.args.join(', ')} -
- )} - - {hasAnyEnvVars && ( -
- Env: {Object.keys(server.env || {}).length}{' '} - variable{Object.keys(server.env || {}).length !== 1 ? 's' : ''} - {hasSensitiveEnvVars && ' (contains sensitive values)'} -
- )} -
-
- ); -} diff --git a/bui/src/islands/AppSettings/MCPServersSection.tsx b/bui/src/islands/AppSettings/MCPServersSection.tsx index bfe67dac..4a4fc91b 100644 --- a/bui/src/islands/AppSettings/MCPServersSection.tsx +++ b/bui/src/islands/AppSettings/MCPServersSection.tsx @@ -1,57 +1,42 @@ import { signal } from '@preact/signals'; import { useEffect, useState } from 'preact/hooks'; import { useAppState } from '../../hooks/useAppState.ts'; -import { MCPServerConfig } from 'shared/config/types.ts'; -import MCPServerItem from './MCPServerItem.tsx'; +import type { MCPServerConfig } from 'shared/config/types.ts'; +import MCPServerFormDialog from '../../components/MCPServerFormDialog.tsx'; +import MCPServerListItem from '../../components/MCPServerListItem.tsx'; +import { useMCPOAuthManager } from '../../components/MCPOAuthManager.tsx'; import { activeTab } from '../AppSettings.tsx'; -// This component used to handle both global and project-level MCP server configs; now it's global only -// It could be simplified, or even lifted into AppSettings, but it's working and not too messy. +// This component handles global MCP server configuration with unified form dialog -interface MCPServersFormState { - globalServers: MCPServerConfig[]; - //projectId: string | null; - // Project-level server configuration removed -} - -interface MCPServersFormErrors { - globalServers?: string; - // Project-level server errors removed -} - -const formErrors = signal({}); const loading = signal(true); -const sensitiveEnvVarPatterns = [ - /token/i, - /key/i, - /secret/i, - /password/i, - /credential/i, -]; +// const sensitiveEnvVarPatterns = [ +// /token/i, +// /key/i, +// /secret/i, +// /password/i, +// /credential/i, +// ]; export default function MCPServersSection() { const appState = useAppState(); - const [formState, setFormState] = useState({ - globalServers: [], - }); + const { authorizeServer } = useMCPOAuthManager(); + + // Simplified state management + const [servers, setServers] = useState([]); const [hasExternalToolsAccess, setHasExternalToolsAccess] = useState(null); const [accessCheckLoading, setAccessCheckLoading] = useState(true); - const [isEditing, setIsEditing] = useState<{ global: Record }>({ - global: {}, - }); - const [showNewServerForm, setShowNewServerForm] = useState<{ global: boolean }>({ - global: false, - }); - // Additional state for structured inputs - const [newArgument, setNewArgument] = useState(''); - const [newEnvKey, setNewEnvKey] = useState(''); - const [newEnvValue, setNewEnvValue] = useState(''); - const [showSensitiveValues, setShowSensitiveValues] = useState(false); - const [newServer, setNewServer] = useState<{ global: MCPServerConfig }>({ - global: { id: '', name: '', command: '', args: [], env: {} }, - }); + // Dialog state + const [dialogState, setDialogState] = useState<{ + isOpen: boolean; + mode: 'new' | 'edit'; + server?: MCPServerConfig; + }>({ isOpen: false, mode: 'new' }); + + // Status messages + const [statusMessage, setStatusMessage] = useState<{ type: 'success' | 'error'; message: string } | null>(null); // Check external tools access useEffect(() => { @@ -70,143 +55,124 @@ export default function MCPServersSection() { checkAccess(); }, [appState.value.apiClient]); - // Load initial config + // Load servers from global config useEffect(() => { - const loadConfig = async () => { + const loadServers = async () => { try { const globalConfig = await appState.value.apiClient?.getGlobalConfig(); - - if (globalConfig) { - setFormState({ - globalServers: globalConfig.api.mcpServers || [], - }); - } + setServers(globalConfig?.api?.mcpServers || []); } catch (error) { console.error('Failed to load MCP server configs:', error); + setStatusMessage({ type: 'error', message: 'Failed to load server configurations' }); } finally { loading.value = false; } }; - loadConfig(); - }, [appState.value.apiClient, appState.value.projectId]); + loadServers(); + }, [appState.value.apiClient]); - const toggleEditMode = (serverId: string) => { - setIsEditing((prev) => ({ - ...prev, - global: { - ...prev.global, - [serverId]: !prev.global[serverId], - }, - })); - }; + // Clear status message after a delay + useEffect(() => { + if (statusMessage) { + const timer = setTimeout(() => setStatusMessage(null), 5000); + return () => clearTimeout(timer); + } + }, [statusMessage]); - const handleServerUpdate = (updatedServer: MCPServerConfig) => { - setFormState((prev) => ({ - ...prev, - globalServers: prev.globalServers.map((server) => server.id === updatedServer.id ? updatedServer : server), - })); + // Dialog handlers + const openNewServerDialog = () => { + setDialogState({ isOpen: true, mode: 'new' }); }; - const handleServerDelete = async (serverId: string) => { - try { - // Remove from global servers - const updatedServers = formState.globalServers.filter((server) => server.id !== serverId); - await appState.value.apiClient?.updateGlobalConfig('api.mcpServers', JSON.stringify(updatedServers)); - setFormState((prev) => ({ ...prev, globalServers: updatedServers })); - } catch (error) { - console.error(`Failed to delete MCP server ${serverId}:`, error); - } + const openEditServerDialog = (server: MCPServerConfig) => { + setDialogState({ isOpen: true, mode: 'edit', server }); }; - const handleNewServerChange = ( - field: keyof MCPServerConfig, - value: string | string[] | Record, - ) => { - setNewServer((prev) => ({ - ...prev, - global: { - ...prev.global, - [field]: value, - }, - })); + const closeDialog = () => { + setDialogState({ isOpen: false, mode: 'new' }); }; - const validateServer = (server: MCPServerConfig): string | undefined => { - if (!server.id) return 'Server ID is required'; - if (!server.command) return 'Command is required'; - - // Check for duplicate IDs - const existingGlobalIds = formState.globalServers.map((s) => s.id); - - if (existingGlobalIds.includes(server.id)) { - return `Server ID '${server.id}' already exists`; + // Server management handlers + // deno-lint-ignore require-await + const handleServerSave = async (server: MCPServerConfig) => { + try { + if (dialogState.mode === 'new') { + // Add new server to local state + setServers((prev) => [...prev, server]); + setStatusMessage({ type: 'success', message: `Server '${server.name}' added successfully` }); + } else { + // Update existing server in local state + setServers((prev) => prev.map((s) => s.id === server.id ? server : s)); + setStatusMessage({ type: 'success', message: `Server '${server.name}' updated successfully` }); + } + closeDialog(); + } catch (error) { + console.error('Failed to save server:', error); + setStatusMessage({ type: 'error', message: 'Failed to save server configuration' }); } - - return undefined; }; - const handleAddNewServer = async () => { - const serverToAdd = newServer.global; - const error = validateServer(serverToAdd); - - if (error) { - formErrors.value = { - ...formErrors.value, - globalServers: error, - }; - return; - } - + const handleServerDelete = async (serverId: string) => { try { - // Add to global servers - const updatedServers = [...formState.globalServers, serverToAdd]; - await appState.value.apiClient?.updateGlobalConfig('api.mcpServers', JSON.stringify(updatedServers)); - setFormState((prev) => ({ ...prev, globalServers: updatedServers })); - setNewServer((prev) => ({ - ...prev, - global: { id: '', name: '', command: '', args: [], env: {} }, - })); - - setShowNewServerForm((prev) => ({ - ...prev, - global: false, - })); - - formErrors.value = {}; + const result = await appState.value.apiClient?.removeMCPServer(serverId); + + if (result?.success) { + // Remove from local state + setServers((prev) => prev.filter((server) => server.id !== serverId)); + setStatusMessage({ type: 'success', message: result.message }); + } else { + setStatusMessage({ type: 'error', message: result?.message || 'Failed to delete server' }); + } } catch (error) { - console.error('Failed to add new MCP server:', error); + console.error('Failed to delete server:', error); + setStatusMessage({ type: 'error', message: 'Failed to delete server' }); } }; - const saveServerChanges = async (server: MCPServerConfig) => { + // OAuth authorization handler + const handleServerAuthorize = async (server: MCPServerConfig) => { try { - // Update global server - await appState.value.apiClient?.updateGlobalConfig( - 'api.mcpServers', - JSON.stringify(formState.globalServers), + setStatusMessage(null); // Clear any previous messages + + await authorizeServer( + server, + (updatedServer) => { + // Update server in local state with new OAuth tokens + setServers((prev) => prev.map((s) => s.id === updatedServer.id ? updatedServer : s)); + setStatusMessage({ + type: 'success', + message: `Successfully authorized connection to '${updatedServer.name}' `, + }); + }, + (error) => { + console.error('OAuth authorization failed:', error); + setStatusMessage({ + type: 'error', + message: `Authorization failed: ${error}`, + }); + }, ); - - // Turn off edit mode - toggleEditMode(server.id); } catch (error) { - console.error('Failed to save MCP server changes:', error); + console.error('Unexpected error during authorization:', error); + setStatusMessage({ + type: 'error', + message: 'Unexpected error during authorization', + }); } }; - // Get available servers - const getAvailableServers = (): MCPServerConfig[] => { - return formState.globalServers; - }; + // Get list of existing server IDs for validation + const getExistingServerIds = () => servers.map((s) => s.id); if (loading.value || accessCheckLoading) { return ( -
-
-
-
-
-
+
+
+
+
+
+
@@ -218,12 +184,60 @@ export default function MCPServersSection() { const containerClasses = isAccessDenied ? 'opacity-50 pointer-events-none' : ''; return ( -
+
+ {/* Status Messages */} + {statusMessage && ( +
+
+
+ {statusMessage.type === 'success' + ? ( + + + + ) + : ( + + + + )} +
+
+

{statusMessage.message}

+
+
+ +
+
+
+ )} + + {/* Access Denied Warning */} {isAccessDenied && ( -
-
-
- +
+
+
+
-
-

+
+

External Tools Access Required

-

+

MCP (Model Context Protocol) servers require external tools access. Please upgrade your plan to configure and use MCP servers.

-
+ )} -
- {/* Global MCP Servers Section */} -
-

- MCP Servers Configuration -

-

- Configure MCP servers available to all projects. Projects can select which servers to include. -

+ {/* Main Content */} +
+
+
+
+

+ MCP Servers Configuration +

+

+ Configure MCP servers available to all projects. Projects can select which servers to + include. +

+
+ + +
- {formState.globalServers.length > 0 + {/* Server List */} + {servers.length > 0 ? ( -
- {formState.globalServers.map((server) => ( - + {servers.map((server) => ( + toggleEditMode(server.id)} - onUpdate={(updatedServer) => handleServerUpdate(updatedServer)} + onEdit={() => openEditServerDialog(server)} onDelete={() => handleServerDelete(server.id)} - onSave={() => saveServerChanges(server)} + onAuthorize={() => handleServerAuthorize(server)} /> ))}
) : ( -
- No global MCP servers configured -
- )} - - {showNewServerForm.global - ? ( -
-

- Add New Server -

- -
-
- - - handleNewServerChange('id', (e.target as HTMLInputElement).value)} - class='mt-1 form-input block w-full rounded-md border-gray-300 dark:border-gray-600 shadow-sm focus:border-blue-500 focus:ring-blue-500 dark:bg-gray-700 dark:text-white text-sm px-3 py-2' - placeholder='slack' - /> -
- -
- - - handleNewServerChange('name', (e.target as HTMLInputElement).value)} - class='mt-1 form-input block w-full rounded-md border-gray-300 dark:border-gray-600 shadow-sm focus:border-blue-500 focus:ring-blue-500 dark:bg-gray-700 dark:text-white text-sm px-3 py-2' - placeholder='Slack' - /> -
- -
- - - handleNewServerChange( - 'command', - (e.target as HTMLInputElement).value, - )} - class='mt-1 form-input block w-full rounded-md border-gray-300 dark:border-gray-600 shadow-sm focus:border-blue-500 focus:ring-blue-500 dark:bg-gray-700 dark:text-white text-sm px-3 py-2' - placeholder='npx' - /> -
- -
- - -
- {/* Existing arguments */} - {(newServer.global.args || []).map((arg, index) => ( -
- { - const newArgs = [...(newServer.global.args || [])]; - newArgs[index] = (e.target as HTMLInputElement).value; - handleNewServerChange('args', newArgs); - }} - class='flex-1 px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 dark:text-white text-sm' - /> - -
- ))} - - {/* Add new argument field */} -
- - setNewArgument((e.target as HTMLInputElement).value)} - placeholder='Add argument...' - class='flex-1 px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 dark:text-white text-sm' - onBlur={() => { - if (newArgument.trim()) { - const newArgs = [ - ...(newServer.global.args || []), - newArgument.trim(), - ]; - handleNewServerChange('args', newArgs); - setNewArgument(''); - } - }} - onKeyDown={(e) => { - if (e.key === 'Enter' && newArgument.trim()) { - e.preventDefault(); - const newArgs = [ - ...(newServer.global.args || []), - newArgument.trim(), - ]; - handleNewServerChange('args', newArgs); - setNewArgument(''); - } - }} - /> - -
- -

- Press Enter or click + to add each argument. Each value will be passed - as a separate argument to the command. -

-
-
- -
-
- - - {Object.keys(newServer.global.env || {}).some((key) => - sensitiveEnvVarPatterns.some((pattern) => pattern.test(key)) - ) && ( - - )} -
- - {/* Existing environment variables */} -
- {Object.entries(newServer.global.env || {}).map(([key, value]) => ( -
- - - pattern.test(key) - ) - ? 'text' - : 'password'} - value={value} - onChange={(e) => { - const newEnv = { ...(newServer.global.env || {}) }; - newEnv[key] = (e.target as HTMLInputElement).value; - handleNewServerChange('env', newEnv); - }} - class='flex-[0.6] px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 text-sm font-mono' - /> - -
- ))} -
- - {/* Add new environment variable */} -
- setNewEnvKey((e.target as HTMLInputElement).value)} - placeholder='Key' - class='flex-[0.4] px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 text-sm' - onBlur={(e) => { - // Do not add on blur of key field, only from value field - }} - onKeyDown={(e) => { - if (e.key === 'Enter' && newEnvKey.trim()) { - e.preventDefault(); - const newEnv = { ...(newServer.global.env || {}) }; - newEnv[newEnvKey.trim()] = newEnvValue; - handleNewServerChange('env', newEnv); - setNewEnvKey(''); - setNewEnvValue(''); - } - }} - /> - - pattern.test(newEnvKey) - ) - ? 'text' - : 'password'} - value={newEnvValue} - onChange={(e) => setNewEnvValue((e.target as HTMLInputElement).value)} - placeholder='Value' - class='flex-[0.6] px-3 py-2 border border-gray-300 dark:border-gray-600 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500 dark:focus:ring-blue-400 bg-white dark:bg-gray-700 text-gray-900 dark:text-gray-100 text-sm' - onBlur={() => { - if (newEnvKey.trim()) { - const newEnv = { ...(newServer.global.env || {}) }; - newEnv[newEnvKey.trim()] = newEnvValue; - handleNewServerChange('env', newEnv); - setNewEnvKey(''); - setNewEnvValue(''); - } - }} - onKeyDown={(e) => { - if (e.key === 'Enter' && newEnvKey.trim()) { - e.preventDefault(); - const newEnv = { ...(newServer.global.env || {}) }; - newEnv[newEnvKey.trim()] = newEnvValue; - handleNewServerChange('env', newEnv); - setNewEnvKey(''); - setNewEnvValue(''); - } - }} - /> - -
- -

- Environment variables are passed to the MCP server when it runs. Use these - to provide API keys and configuration. -

-
-
- - {formErrors.value.globalServers && ( -

- {formErrors.value.globalServers} -

- )} - -
- - -
-
- ) - : ( - - )} -
- - {/* Available Servers Section */} -
-

- Available MCP Servers -

-

- The following servers are available for use in projects. Each project can select which servers - to include. -

- - {getAvailableServers().length > 0 - ? ( -
-
-							  {getAvailableServers().map(server => (
-								
-
- {server.id} -
-
name: {server.name || ''}
-
command: {server.command}
- {server.args && server.args.length > 0 && ( -
- args: [{server.args.map(arg => `"${arg}"`).join(', ')}] -
- )} - {server.env && Object.keys(server.env).length > 0 && ( -
- env: - {Object.entries(server.env).map(([key, value]) => ( -
- {key}: {sensitiveEnvVarPatterns.some(pattern => pattern.test(key)) ? '********' : value} -
- ))} -
- )} +

+ No MCP servers configured +

+

+ Get started by adding your first MCP server. +

+
+
- ))} -
-
- ) - : ( -
- No MCP servers configured
)} + + {/* Available Servers Summary */} + {servers.length > 0 && ( +
+

+ Server Summary +

+

+ You have {servers.length} MCP server{servers.length !== 1 ? 's' : ''}{' '} + configured. Each project can select which servers to include. +

+
+ )}
+ + {/* Form Dialog */} +
); } diff --git a/bui/src/islands/AuthContext.tsx b/bui/src/islands/AuthContext.tsx index 5fa9f10c..844b60f0 100644 --- a/bui/src/islands/AuthContext.tsx +++ b/bui/src/islands/AuthContext.tsx @@ -5,6 +5,7 @@ import { IS_BROWSER } from '$fresh/runtime.ts'; import { initializeAuthState, useAuthState } from '../hooks/useAuthState.ts'; import type { BuiConfig } from 'shared/config/types.ts'; import { AuthError, authError } from './auth/AuthError.tsx'; +import { setBuiConfig } from '../hooks/useAppState.ts'; interface AuthContextProps { children: ComponentChildren; @@ -61,6 +62,7 @@ export default function AuthContext({ children, buiConfig }: AuthContextProps) { // Initial auth check const initialCheck = async () => { initializeAuthState(buiConfig); + setBuiConfig(buiConfig); await checkSession(); isLoading.value = false; }; diff --git a/bui/src/islands/Chat.tsx b/bui/src/islands/Chat.tsx index 86d18e8c..a2b96d9c 100644 --- a/bui/src/islands/Chat.tsx +++ b/bui/src/islands/Chat.tsx @@ -27,12 +27,16 @@ import { ChatInput } from '../components/ChatInput.tsx'; import { CollaborationStateEmpty } from '../components/CollaborationStateEmpty.tsx'; //import { ToolBar } from '../components/ToolBar.tsx'; //import { ApiStatus } from 'shared/types.ts'; -import type { CollaborationLogDataEntry, CollaborationValues } from 'shared/types.ts'; +import type { CollaborationLogDataEntry, CollaborationLogEntry, CollaborationValues } from 'shared/types.ts'; import { generateInteractionId, shortenInteractionId } from 'shared/generateIds.ts'; import { getApiHostname, getApiPort, getApiUseTls } from '../utils/url.utils.ts'; import { getWorkingApiUrl } from '../utils/connectionManager.utils.ts'; import { LLMRolesModelConfig } from 'api/types.ts'; import { focusChatInputSync } from '../utils/focusManagement.utils.ts'; +import type { LogEntryFormatResponse } from '../utils/apiClient.utils.ts'; +import { logDataEntryHasLogEntry } from '../utils/typeGuards.utils.ts'; +import { useLogEntryFilterState } from '../hooks/useLogEntryFilterState.ts'; +import { shouldShowParentEntry } from '../utils/logEntryFilterState.utils.ts'; // Helper functions for URL parameters const getCollaborationId = () => { @@ -156,6 +160,15 @@ const chatConfig = signal({ ...defaultChatConfig }); const modelData = signal(null); const attachedFiles = signal([]); +// Cache for formatted log entries - key is unique entry identifier, value is formatted result +const formattedEntriesCache = signal>(new Map()); + +// Generate unique key for cache based on entry properties +function getEntryKey(logEntry: CollaborationLogEntry): string { + return logEntry ? `${logEntry.entryType || 'no_entry'}_${JSON.stringify(logEntry.content).slice(0, 50)}` : ''; + //return logEntry ? `${logDataEntry.timestamp}_${logEntry.entryType || 'no_entry'}_${JSON.stringify(logEntry.content).slice(0, 50)}` : ''; +} + export default function Chat({ chatState, }: ChatProps): JSX.Element { @@ -186,6 +199,19 @@ export default function Chat({ // Track input changes for performance monitoring //const lastInputUpdateRef = useRef(Date.now()); + // Get filter state + const { filterState } = useLogEntryFilterState(); + + // Track filter preset to force remount when it changes + const [filterPresetKey, setFilterPresetKey] = useState(filterState.value.preset); + + // Update key when filter preset changes to force MessageEntry remount + useEffect(() => { + setFilterPresetKey(filterState.value.preset); + // Enable auto-scroll when filter changes so user sees new content + setShouldAutoScroll(true); + }, [filterState.value.preset]); + const setInputWithTracking = (value: string) => { //const now = Date.now(); //const timeSinceLastUpdate = now - lastInputUpdateRef.current; @@ -377,10 +403,40 @@ export default function Chat({ // Utility functions - const handleCopy = async (text: string) => { + const handleCopy = async (text: string, html?: string, toastMessage?: string) => { + if (!text && !html) { + setToastMessage(toastMessage || 'Failed to copy content'); + setShowToast(true); + } try { - await navigator.clipboard.writeText(text); - setToastMessage('Content copied to clipboard!'); + // Copy using the modern API or fallback + if (navigator.clipboard && window.isSecureContext) { + //await navigator.clipboard.writeText(text); + + const typePlain = 'text/plain'; + const typeHtml = 'text/html'; + const clipboardItems: ClipboardItem[] = []; + if (html) { + clipboardItems.push( + new ClipboardItem({ + [typeHtml]: html, + }), + ); + } + if (text) { + clipboardItems.push( + new ClipboardItem({ + [typePlain]: text, + }), + ); + } + + await navigator.clipboard.write(clipboardItems); + } else { + // Fallback for older browsers + document.execCommand('copy'); + } + setToastMessage(toastMessage || 'Content copied to clipboard!'); setShowToast(true); } catch (err) { console.error('ChatIsland: Failed to copy:', err); @@ -430,6 +486,23 @@ export default function Chat({ return logDataEntry; }; + const onFormattedLogEntry = ( + logEntry: CollaborationLogEntry, + formattedLogEntry: LogEntryFormatResponse['formattedResult'], + ) => { + const entryKey = getEntryKey(logEntry); + const hasCachedEntry = formattedEntriesCache.value.has(entryKey); + if (!hasCachedEntry) formattedEntriesCache.value.set(entryKey, formattedLogEntry); + }; + + const getFormattedLogEntry = ( + logDataEntry: CollaborationLogDataEntry, + ): LogEntryFormatResponse['formattedResult'] | null => { + if (!logDataEntry.logEntry) return null; + const entryKey = getEntryKey(logDataEntry.logEntry); + return formattedEntriesCache.value.get(entryKey) || null; + }; + const deleteCollaboration = async (id: string) => { try { if (!projectId) throw new Error('projectId is undefined for delete collaboration'); @@ -745,7 +818,7 @@ export default function Chat({ } return () => messagesContainer.removeEventListener('scroll', handleScroll); - }, [chatState.value.logDataEntries, shouldAutoScroll]); + }, [chatState.value.logDataEntries, shouldAutoScroll, filterState.value.preset]); // Maintain scroll position when input height changes useEffect(() => { @@ -963,6 +1036,9 @@ export default function Chat({ disabled={!chatState.value.status.isReady || isProcessing(chatState.value.status)} projectId={projectId} + onCopy={handleCopy} + getFormattedLogEntry={getFormattedLogEntry} + projectConfig={projectState.value.projectConfig} /> {/* Messages */} @@ -1033,17 +1109,24 @@ export default function Chat({
)} {chatState.value.logDataEntries.length > 0 && - chatState.value.logDataEntries.map((logDataEntry, index) => ( - - ))} + // FUTURE: Add fade-in animation here for smooth entry appearance + // Consider: transition-opacity duration-200 + chatState.value.logDataEntries + .filter((entry) => shouldShowParentEntry(entry, filterState.value)) + .map((logDataEntry, index) => ( + + ))}
diff --git a/bui/src/islands/Context/appConfig.tsx b/bui/src/islands/Context/appConfig.tsx index 2916ad47..46bc1c72 100644 --- a/bui/src/islands/Context/appConfig.tsx +++ b/bui/src/islands/Context/appConfig.tsx @@ -3,6 +3,7 @@ import type { ComponentChildren } from 'preact'; import { IS_BROWSER } from '$fresh/runtime.ts'; import { initializeAuthState, useAuthState } from '../../hooks/useAuthState.ts'; import type { BuiConfig } from 'shared/config/types.ts'; +import { setBuiConfig } from '../../hooks/useAppState.ts'; interface AppConfigContextProps { children: ComponentChildren; @@ -19,6 +20,7 @@ export default function AppConfigContext({ children, buiConfig }: AppConfigConte // Initialize app state const initializeAppConfig = () => { initializeAuthState(buiConfig); + setBuiConfig(buiConfig); }; initializeAppConfig(); diff --git a/bui/src/plugins/supabaseAuth.ts b/bui/src/plugins/supabaseAuth.ts index 21020277..d969eb23 100644 --- a/bui/src/plugins/supabaseAuth.ts +++ b/bui/src/plugins/supabaseAuth.ts @@ -3,7 +3,7 @@ import type { FreshContext, Plugin } from '$fresh/server.ts'; //import { type AuthState } from '../types/auth.ts'; import { initializeAuthState, useAuthState } from '../hooks/useAuthStateSupabase.ts'; import { type BuiConfig } from 'shared/config/types.ts'; -//import { FreshAppState } from 'bui/types/state.ts'; // Augment Fresh context state types +import { FreshAppState } from 'bui/types/state.ts'; export const supabaseAuthPlugin = (buiConfig: BuiConfig): Plugin => { //console.log('supabaseAuthPlugin: Auth system config:', buiConfig); @@ -37,7 +37,7 @@ export const supabaseAuthPlugin = (buiConfig: BuiConfig): Plugin => { }; }; -async function setSessionState(req: Request, ctx: FreshContext) { +async function setSessionState(req: Request, ctx: FreshContext) { if (ctx.destination !== 'route') return await ctx.next(); const { authState, getServerClient } = useAuthState(); diff --git a/bui/src/routes/_middleware.ts b/bui/src/routes/_middleware.ts index 139323e6..9d9153b8 100644 --- a/bui/src/routes/_middleware.ts +++ b/bui/src/routes/_middleware.ts @@ -2,6 +2,21 @@ import { FreshContext, MiddlewareHandlerContext } from '$fresh/server.ts'; import { FreshAppState } from 'bui/types/state.ts'; // Augment Fresh context state types export async function handler(req: Request, ctx: MiddlewareHandlerContext) { + if (req.method == 'OPTIONS') { + const resp = new Response(null, { + status: 204, + }); + const origin = req.headers.get('Origin') || '*'; + const headers = resp.headers; + headers.set('Access-Control-Allow-Origin', origin); + headers.set('Access-Control-Allow-Methods', 'POST, OPTIONS, GET, PUT, DELETE'); + headers.set( + 'Access-Control-Allow-Headers', + 'Content-Type, Content-Length, Accept-Encoding, X-CSRF-Token, Authorization, accept, origin, Cache-Control, X-Requested-With', + ); + headers.set('Access-Control-Allow-Credentials', 'true'); + return resp; + } const origin = req.headers.get('Origin') || '*'; const resp = await ctx.next(); const headers = resp.headers; diff --git a/bui/src/routes/api/v1/oauth/google/config.ts b/bui/src/routes/api/v1/oauth/google/config.ts deleted file mode 100644 index ca89d8ba..00000000 --- a/bui/src/routes/api/v1/oauth/google/config.ts +++ /dev/null @@ -1,72 +0,0 @@ -import { type FreshContext, Handlers } from '$fresh/server.ts'; -import type { FreshAppState } from 'bui/types/state.ts'; - -/** - * Get Google OAuth configuration for the BUI - */ -export const handler: Handlers = { - GET(_req, ctx: FreshContext) { - try { - // Get OAuth configuration from state (set by stateConfig plugin) - const clientId = ctx.state.buiConfig.googleOauth.clientId; - const redirectUri = ctx.state.buiConfig.googleOauth.redirectUri; - //console.log(`OAuth: handling token: `, { clientId, clientSecret, redirectUri }); - - if (!clientId || !redirectUri) { - console.error('OAuth: Missing Google OAuth configuration'); - return new Response( - JSON.stringify({ - error: { - code: 'MISSING_CONFIG', - message: 'Google OAuth configuration incomplete', - reason: 'missing_oauth_config', - }, - }), - { - status: 500, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } - - // Define required scopes for Google Docs integration - const scopes = [ - 'https://www.googleapis.com/auth/documents', // read and write access to docs - //'https://www.googleapis.com/auth/documents.readonly', // read access to docs - 'https://www.googleapis.com/auth/drive', // read and write access to files - //'https://www.googleapis.com/auth/drive.readonly', // read access to files - //'https://www.googleapis.com/auth/drive.file', // read and write access to files created by BB - ]; - - // Return config for PKCE OAuth flow (no client secret needed) - // clientId is application-level config, not stored with user credentials - - return new Response( - JSON.stringify({ - clientId, - //clientSecret, - redirectUri, - scopes, - }), - { - status: 200, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } catch (error) { - console.error('OAuth: Google config error:', error); - return new Response( - JSON.stringify({ - error: { - code: 'SERVER_ERROR', - message: 'Internal server error', - }, - }), - { - status: 500, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } - }, -}; diff --git a/bui/src/routes/api/v1/oauth/google/token.ts b/bui/src/routes/api/v1/oauth/google/token.ts deleted file mode 100644 index e6fc4fc6..00000000 --- a/bui/src/routes/api/v1/oauth/google/token.ts +++ /dev/null @@ -1,336 +0,0 @@ -import { type FreshContext, Handlers } from '$fresh/server.ts'; -import type { FreshAppState } from 'bui/types/state.ts'; - -/** - * Handle Google OAuth token operations: - * 1. Exchange authorization code for tokens using PKCE (Proof Key for Code Exchange - RFC 7636) - * 2. Refresh access tokens using refresh tokens - * - * PKCE eliminates the need for client secrets in public clients like desktop apps. - * Instead of storing a shared secret, we use cryptographic proof that the same - * client that initiated the auth flow is completing the token exchange. - * - * For token refresh, the client secret is securely handled server-side. - */ -export const handler: Handlers = { - async POST(req, ctx: FreshContext) { - const GOOGLE_TOKEN_URL = 'https://oauth2.googleapis.com/token'; - - try { - const body = await req.json(); - const { code, codeVerifier, state: _state, refreshToken, operation } = body; - //console.log(`OAuth: handling token for ${operation || 'exchange'}`); - - // Determine operation type - refresh or token exchange - const isRefreshOperation = operation === 'refresh'; - - // Validate parameters based on operation type - if (isRefreshOperation) { - if (!refreshToken) { - return new Response( - JSON.stringify({ - error: { - code: 'INVALID_REQUEST', - message: 'Refresh token is required for refresh operation', - reason: 'missing_refresh_token', - }, - }), - { - status: 400, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } - } else { - // Validate required PKCE parameters for token exchange - if (!code) { - return new Response( - JSON.stringify({ - error: { - code: 'INVALID_REQUEST', - message: 'Authorization code is required', - reason: 'missing_code', - }, - }), - { - status: 400, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } - - if (!codeVerifier) { - return new Response( - JSON.stringify({ - error: { - code: 'INVALID_REQUEST', - message: 'PKCE code verifier is required', - reason: 'missing_code_verifier', - }, - }), - { - status: 400, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } - } - - // Get OAuth configuration from state (set by stateConfig plugin) - const clientId = ctx.state.buiConfig.googleOauth.clientId; - const clientSecret = ctx.state.buiConfig.googleOauth.clientSecret; - const redirectUri = ctx.state.buiConfig.googleOauth.redirectUri; - //console.log(`OAuth: handling token: `, { clientId, clientSecret, redirectUri }); - - if (!clientId || !clientSecret || !redirectUri) { - console.error('OAuth: Missing Google OAuth configuration'); - return new Response( - JSON.stringify({ - error: { - code: 'MISSING_CONFIG', - message: 'Google OAuth configuration incomplete', - reason: 'missing_oauth_config', - }, - }), - { - status: 500, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } - - let tokenResponse: Response; - let operationLog: string; - - if (isRefreshOperation) { - // Handle token refresh - operationLog = 'refreshing access token'; - console.log('OAuth: Refreshing access token'); - console.log('OAuth: Using Client ID:', clientId); - console.log('OAuth: Request body:', { - refresh_token: refreshToken ? 'present' : 'missing', - redirectUri, - }); - - tokenResponse = await fetch(GOOGLE_TOKEN_URL, { - method: 'POST', - headers: { - 'Content-Type': 'application/x-www-form-urlencoded', - }, - body: new URLSearchParams({ - grant_type: 'refresh_token', - refresh_token: refreshToken, - client_id: clientId, - client_secret: clientSecret, - }), - }); - } else { - // Handle authorization code exchange using PKCE - operationLog = 'exchanging authorization code for tokens using PKCE'; - console.log('OAuth: Exchanging authorization code for tokens using PKCE'); - console.log('OAuth: Using Client ID:', clientId); - console.log('OAuth: Request body:', { - code: code ? 'present' : 'missing', - codeVerifier: codeVerifier ? 'present' : 'missing', - redirectUri, - }); - - // // Log the exact parameters being sent to Google - // const tokenParams = { - // code, - // client_id: clientId, - // client_secret: clientSecret, // Use the actual secret Google provided - // code_verifier: codeVerifier, - // redirect_uri: redirectUri, - // grant_type: 'authorization_code', - // }; - // console.log('OAuth: Exact token request params:', tokenParams); - - tokenResponse = await fetch(GOOGLE_TOKEN_URL, { - method: 'POST', - headers: { - 'Content-Type': 'application/x-www-form-urlencoded', - }, - body: new URLSearchParams({ - code, - client_id: clientId, - client_secret: clientSecret, // Empty string for Desktop PKCE - // PKCE: Use code_verifier instead of client_secret - code_verifier: codeVerifier, - redirect_uri: redirectUri, - grant_type: 'authorization_code', - // Empty client_secret + PKCE for Desktop apps - }), - }); - } - - if (!tokenResponse.ok) { - const errorText = await tokenResponse.text(); - console.error(`OAuth: ${operationLog} failed (${tokenResponse.status}): ${errorText}`); - - return new Response( - JSON.stringify({ - error: { - code: isRefreshOperation ? 'TOKEN_REFRESH_FAILED' : 'TOKEN_EXCHANGE_FAILED', - message: isRefreshOperation - ? 'Failed to refresh access token' - : 'Failed to exchange authorization code for tokens using PKCE', - reason: isRefreshOperation ? 'invalid_refresh_token' : 'invalid_code_or_verifier', - }, - }), - { - status: 400, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } - - const tokenData = await tokenResponse.json(); - - // Calculate expiration timestamp - const expiresAt = tokenData.expires_in ? Date.now() + (tokenData.expires_in * 1000) : undefined; - - console.log(`OAuth: Successfully ${operationLog}`); - - // Return token data in format expected by AuthConfig - // Note: clientId is not included - it comes from app config, not user credentials - return new Response( - JSON.stringify({ - accessToken: tokenData.access_token, - refreshToken: tokenData.refresh_token, - expiresIn: tokenData.expires_in, - expiresAt, - tokenType: tokenData.token_type || 'Bearer', - scope: tokenData.scope, - }), - { - status: 200, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } catch (error) { - console.error('OAuth: Token operation error:', error); - return new Response( - JSON.stringify({ - error: { - code: 'SERVER_ERROR', - message: 'Internal server error during OAuth token operation', - }, - }), - { - status: 500, - headers: { 'Content-Type': 'application/json' }, - }, - ); - } - }, - // // GET request is handled by /oauth/google/callback - /* - async GET(req, _ctx) { - // Handle OAuth callback from Google (authorization code in query params) - try { - const url = new URL(req.url); - const code = url.searchParams.get('code'); - const state = url.searchParams.get('state'); - const error = url.searchParams.get('error'); - - // Handle OAuth errors - if (error) { - console.error('OAuth: Authorization error:', error); - const errorDescription = url.searchParams.get('error_description') || 'Unknown error'; - - // Return HTML page that posts error message to parent window - return new Response(` - - - OAuth Error - - - - - `, { - status: 200, - headers: { 'Content-Type': 'text/html' }, - }); - } - - // Validate required parameters - if (!code || !state) { - console.error('OAuth: Missing required parameters in callback'); - - // Return HTML page that posts error message to parent window - return new Response(` - - - OAuth Error - - - - - `, { - status: 200, - headers: { 'Content-Type': 'text/html' }, - }); - } - - console.log('OAuth: Received authorization callback with code and state'); - - // Return HTML page that posts success message to parent window - return new Response(` - - - OAuth Success - - - - - `, { - status: 200, - headers: { 'Content-Type': 'text/html' }, - }); - - } catch (error) { - console.error('OAuth: Callback handling error:', error); - - // Return HTML page that posts error message to parent window - return new Response(` - - - OAuth Error - - - - - `, { - status: 200, - headers: { 'Content-Type': 'text/html' }, - }); - } - }, - */ -}; diff --git a/bui/src/routes/oauth/google/callback.tsx b/bui/src/routes/oauth/google/callback.tsx deleted file mode 100644 index 85a69189..00000000 --- a/bui/src/routes/oauth/google/callback.tsx +++ /dev/null @@ -1,171 +0,0 @@ -import { type PageProps } from '$fresh/server.ts'; - -export default function GoogleOAuthCallback({ url }: PageProps) { - // Extract query parameters - const code = url.searchParams.get('code'); - const state = url.searchParams.get('state'); - const error = url.searchParams.get('error'); - const errorDescription = url.searchParams.get('error_description'); - - return ( - - - Google OAuth Callback - - - - - -
-
-

Processing authentication...

-

-
- -