diff --git a/aiprompts/aimodesconfig.md b/aiprompts/aimodesconfig.md new file mode 100644 index 0000000000..207b6fad88 --- /dev/null +++ b/aiprompts/aimodesconfig.md @@ -0,0 +1,709 @@ +# Wave AI Modes Configuration - Visual Editor Architecture + +## Overview + +Wave Terminal's AI modes configuration system allows users to define custom AI assistants with different providers, models, and capabilities. The configuration is stored in `~/.waveterm/config/waveai.json` and provides a flexible way to configure multiple AI modes that appear in the Wave AI panel. + +**Key Design Decisions:** +- Visual editor works on **valid JSON only** - if JSON is invalid, fall back to JSON editor +- Default modes (`waveai@quick`, `waveai@balanced`, `waveai@deep`) are **read-only** in visual editor +- Edits modify the **in-memory JSON directly** - changes saved via existing save button +- Mode keys are **auto-generated** from provider + model or random ID (last 4-6 chars) +- Secrets use **fixed naming convention** per provider (e.g., `OPENAI_KEY`, `OPENROUTER_KEY`) +- Quick **inline secret editor** instead of complex secret management + +## Current System Architecture + +### Data Structure + +**Location:** `pkg/wconfig/settingsconfig.go:264-284` + +```go +type AIModeConfigType struct { + // Display Configuration + DisplayName string `json:"display:name"` // Required + DisplayOrder float64 `json:"display:order,omitempty"` + DisplayIcon string `json:"display:icon,omitempty"` + DisplayShortDesc string `json:"display:shortdesc,omitempty"` + DisplayDescription string `json:"display:description,omitempty"` + + // Provider & Model + Provider string `json:"ai:provider,omitempty"` // wave, google, openrouter, openai, azure, azure-legacy, custom + APIType string `json:"ai:apitype"` // Required: anthropic-messages, openai-responses, openai-chat + Model string `json:"ai:model"` // Required + + // AI Behavior + ThinkingLevel string `json:"ai:thinkinglevel,omitempty"` // low, medium, high + Capabilities []string `json:"ai:capabilities,omitempty"` // pdfs, images, tools + + // Connection Details + Endpoint string `json:"ai:endpoint,omitempty"` + APIVersion string `json:"ai:apiversion,omitempty"` + APIToken string `json:"ai:apitoken,omitempty"` + APITokenSecretName string `json:"ai:apitokensecretname,omitempty"` + + // Azure-Specific + AzureResourceName string `json:"ai:azureresourcename,omitempty"` + AzureDeployment string `json:"ai:azuredeployment,omitempty"` + + // Wave AI Specific + WaveAICloud bool `json:"waveai:cloud,omitempty"` + WaveAIPremium bool `json:"waveai:premium,omitempty"` +} +``` + +**Storage:** `FullConfigType.WaveAIModes` - `map[string]AIModeConfigType` + +Keys follow pattern: `provider@modename` (e.g., `waveai@quick`, `openai@gpt4`) + +### Provider Types & Defaults + +**Defined in:** `pkg/aiusechat/uctypes/uctypes.go:27-35` + +1. **wave** - Wave AI Cloud service + - Auto-sets: `waveai:cloud = true`, endpoint from env or default + - Default endpoint: `https://cfapi.waveterm.dev/api/waveai` + - Used for Wave's hosted AI modes + +2. **openai** - OpenAI API + - Auto-sets: endpoint `https://api.openai.com/v1` + - Auto-detects API type based on model: + - Legacy models (gpt-4o, gpt-3.5): `openai-chat` + - New models (gpt-5*, gpt-4.1*, o1*, o3*): `openai-responses` + +3. **openrouter** - OpenRouter service + - Auto-sets: endpoint `https://openrouter.ai/api/v1`, API type `openai-chat` + +4. **google** - Google AI (Gemini, etc.) + - No auto-defaults currently + +5. **azure** - Azure OpenAI (new unified API) + - Auto-sets: API version `v1`, endpoint from resource name + - Endpoint pattern: `https://{resource}.openai.azure.com/openai/v1/{responses|chat/completions}` + - Auto-detects API type based on model + +6. **azure-legacy** - Azure OpenAI (legacy chat completions) + - Auto-sets: API version `2025-04-01-preview`, API type `openai-chat` + - Endpoint pattern: `https://{resource}.openai.azure.com/openai/deployments/{deployment}/chat/completions?api-version={version}` + - Requires `AzureResourceName` and `AzureDeployment` + +7. **custom** - Custom provider + - No auto-defaults + - User must specify all fields manually + +### Default Configuration + +**Location:** `pkg/wconfig/defaultconfig/waveai.json` + +Ships with three Wave AI modes: +- `waveai@quick` - Fast responses (gpt-5-mini, low thinking) +- `waveai@balanced` - Balanced (gpt-5.1, low thinking) [premium] +- `waveai@deep` - Maximum capability (gpt-5.1, medium thinking) [premium] + +### Current UI State + +**Location:** `frontend/app/view/waveconfig/waveaivisual.tsx` + +Currently shows placeholder: "Visual editor coming soon..." + +The component receives: +- `model: WaveConfigViewModel` - Access to config file operations +- Existing patterns from `SecretsContent` for list/detail views + +## Visual Editor Design Plan + +### High-Level Architecture + +``` +┌─────────────────────────────────────────────────────────┐ +│ Wave AI Modes Configuration │ +│ ┌───────────────┐ ┌──────────────────────────────┐ │ +│ │ │ │ │ │ +│ │ Mode List │ │ Mode Editor/Viewer │ │ +│ │ │ │ │ │ +│ │ [Quick] │ │ Provider: [wave ▼] │ │ +│ │ [Balanced] │ │ │ │ +│ │ [Deep] │ │ Display Configuration │ │ +│ │ [Custom] │ │ ├─ Name: ... │ │ +│ │ │ │ ├─ Icon: ... │ │ +│ │ [+ Add New] │ │ └─ Description: ... │ │ +│ │ │ │ │ │ +│ │ │ │ Provider Configuration │ │ +│ │ │ │ (Provider-specific fields) │ │ +│ │ │ │ │ │ +│ │ │ │ [Save] [Delete] [Cancel] │ │ +│ └───────────────┘ └──────────────────────────────┘ │ +└─────────────────────────────────────────────────────────┘ +``` + +### Component Structure + +```typescript +WaveAIVisualContent +├─ ModeList (left panel) +│ ├─ Header with "Add New Mode" button +│ ├─ List of existing modes (sorted by display:order) +│ │ └─ ModeListItem (icon, name, short desc, provider badge) +│ └─ Empty state if no modes +│ +└─ ModeEditor (right panel) + ├─ Provider selector dropdown (when creating/editing) + ├─ Display section (common to all providers) + │ ├─ Name input (required) + │ ├─ Icon picker (optional) + │ ├─ Display order (optional, number) + │ ├─ Short description (optional) + │ └─ Description textarea (optional) + │ + ├─ Provider Configuration section (dynamic based on provider) + │ └─ [Provider-specific form fields] + │ + └─ Action buttons (Save, Delete, Cancel) +``` + +### Provider-Specific Form Fields + +#### 1. Wave Provider (`wave`) +**Read-only/Auto-managed:** +- Endpoint (shows default or env override) +- Cloud flag (always true) +- Secret: Not applicable (managed by Wave) + +**User-configurable:** +- Model (required, text input with suggestions: gpt-5-mini, gpt-5.1) +- API Type (required, dropdown: openai-responses, openai-chat) +- Thinking Level (optional, dropdown: low, medium, high) +- Capabilities (optional, checkboxes: tools, images, pdfs) +- Premium flag (checkbox) + +#### 2. OpenAI Provider (`openai`) +**Auto-managed:** +- Endpoint (shows: api.openai.com/v1) +- API Type (auto-detected from model, editable) +- Secret Name: Fixed as `OPENAI_KEY` + +**User-configurable:** +- Model (required, text input with suggestions: gpt-4o, gpt-5-mini, gpt-5.1, o1-preview) +- API Key (via secret modal - see Secret Management below) +- Thinking Level (optional) +- Capabilities (optional) + +#### 3. OpenRouter Provider (`openrouter`) +**Auto-managed:** +- Endpoint (shows: openrouter.ai/api/v1) +- API Type (always openai-chat) +- Secret Name: Fixed as `OPENROUTER_KEY` + +**User-configurable:** +- Model (required, text input - OpenRouter model format) +- API Key (via secret modal) +- Thinking Level (optional) +- Capabilities (optional) + +#### 4. Azure Provider (`azure`) +**Auto-managed:** +- API Version (always v1) +- Endpoint (computed from resource name) +- API Type (auto-detected from model) +- Secret Name: Fixed as `AZURE_KEY` + +**User-configurable:** +- Azure Resource Name (required, validated format) +- Model (required) +- API Key (via secret modal) +- Thinking Level (optional) +- Capabilities (optional) + +#### 5. Azure Legacy Provider (`azure-legacy`) +**Auto-managed:** +- API Version (default: 2025-04-01-preview, editable) +- API Type (always openai-chat) +- Endpoint (computed from resource + deployment + version) +- Secret Name: Fixed as `AZURE_KEY` + +**User-configurable:** +- Azure Resource Name (required, validated) +- Azure Deployment (required) +- Model (required) +- API Key (via secret modal) +- Thinking Level (optional) +- Capabilities (optional) + +#### 6. Google Provider (`google`) +**Auto-managed:** +- Secret Name: Fixed as `GOOGLE_KEY` + +**User-configurable:** +- Model (required) +- API Type (required dropdown) +- Endpoint (required) +- API Key (via secret modal) +- API Version (optional) +- Thinking Level (optional) +- Capabilities (optional) + +#### 7. Custom Provider (`custom`) +**User must specify everything:** +- Model (required) +- API Type (required dropdown) +- Endpoint (required) +- Secret Name (required text input - user defines their own secret name) +- API Key (via secret modal using custom secret name) +- API Version (optional) +- Thinking Level (optional) +- Capabilities (optional) +- Azure Resource Name (optional) +- Azure Deployment (optional) + +### Data Flow + +``` +Load JSON → Parse → Render Visual Editor + ↓ + User Edits Mode → Update fileContentAtom (JSON string) + ↓ + Click Save → Existing save logic validates & writes +``` + +**Simplified Operations:** +1. **Load:** Parse `fileContentAtom` JSON string into mode objects for display +2. **Edit Mode:** Update parsed object → stringify → set `fileContentAtom` → marks as edited +3. **Add Mode:** + - Generate unique key from provider/model or random ID + - Add new mode to parsed object → stringify → set `fileContentAtom` +4. **Delete Mode:** Remove key from parsed object → stringify → set `fileContentAtom` +5. **Save:** Existing `model.saveFile()` handles validation and write + +**Mode Key Generation:** +```typescript +function generateModeKey(provider: string, model: string): string { + // Try semantic key first: provider@model-sanitized + const sanitized = model.toLowerCase() + .replace(/[^a-z0-9]/g, '-') + .replace(/-+/g, '-') + .replace(/^-|-$/g, ''); + const semanticKey = `${provider}@${sanitized}`; + + // Check for collision, if exists append random suffix + if (existingModes[semanticKey]) { + const randomId = crypto.randomUUID().slice(-6); + return `${provider}@${sanitized}-${randomId}`; + } + return semanticKey; +} +// Examples: openai@gpt-4o, openrouter@claude-3-5-sonnet, azure@custom-fb4a2c +``` + +**Secret Naming Convention:** +```typescript +// Fixed secret names per provider (except custom) +const SECRET_NAMES = { + openai: "OPENAI_KEY", + openrouter: "OPENROUTER_KEY", + azure: "AZURE_KEY", + "azure-legacy": "AZURE_KEY", + google: "GOOGLE_KEY", + // custom provider: user specifies their own secret name +} as const; + +function getSecretName(provider: string, customSecretName?: string): string { + if (provider === "custom") { + return customSecretName || "CUSTOM_API_KEY"; + } + return SECRET_NAMES[provider]; +} +``` + +### Secret Management UI + +**Secret Status Indicator:** +Display next to API Key field for providers that need one: +- ✅ Green check icon: Secret exists and is set +- ⚠️ Warning icon (yellow/orange): Secret not set or empty +- Click icon to open secret modal + +**Secret Modal:** +``` +┌─────────────────────────────────────┐ +│ Set API Key for OpenAI │ +│ │ +│ Secret Name: OPENAI_KEY │ +│ [read-only for non-custom] │ +│ │ +│ API Key: │ +│ [********************] [Show/Hide]│ +│ │ +│ [Cancel] [Save] │ +└─────────────────────────────────────┘ +``` + +**Modal Behavior:** +1. **Open Modal:** Click status icon or "Set API Key" button +2. **Show Secret Name:** + - Non-custom providers: Read-only, shows fixed name + - Custom provider: Editable text input (user specifies) +3. **API Key Input:** + - Masked password field + - Show/Hide toggle button + - Load existing value if secret already exists +4. **Save:** + - Validates not empty + - Calls RPC to set secret + - Updates status icon +5. **Cancel:** Close without changes + +**Integration with Mode Editor:** +- Check secret existence on mode load/select +- Update icon based on RPC `GetSecretsCommand` result +- "Save" button for mode only saves JSON config +- Secret is set immediately via modal (separate from JSON save) + +### Key Features + +#### 1. Mode List +- Display modes sorted by `display:order` (ascending) +- Show icon, name, short description +- Badge showing provider type +- Highlight Wave AI premium modes +- Click to edit + +#### 2. Add New Mode Flow +1. Click "Add New Mode" +2. Enter mode key (validated: alphanumeric, @, -, ., _) +3. Select provider from dropdown +4. Form dynamically updates to show provider-specific fields +5. Fill required fields (marked with *) +6. Save → validates → adds to config → refreshes list + +#### 3. Edit Mode Flow +1. Click mode from list +2. Load mode data into form +3. Provider is fixed (show read-only or with warning about changing) +4. Edit fields +5. Save → validates → updates config → refreshes list + +**Raw JSON Editor Option:** +- "Edit Raw JSON" button in mode editor (available for all modes) +- Opens modal with Monaco editor showing just this mode's JSON +- Validates JSON structure before allowing save +- Useful for: + - Modes without a provider field (edge cases) + - Advanced users who want precise control + - Copying/modifying complex configurations +- Validation checks: + - Valid JSON syntax + - Required fields present (`display:name`, `ai:apitype`, `ai:model`) + - Enum values valid + - Custom error messages for each validation failure + +#### 4. Delete Mode Flow +1. Click mode from list +2. Delete button in editor +3. Confirm dialog +4. Remove from config → save → refresh list + +#### 5. Secret Integration +- For API Token fields, provide two options: + - Direct input (text field, masked) + - Secret reference (dropdown of existing secrets + link to secrets page) +- When secret is selected, store name in `ai:apitokensecretname` +- When direct token, store in `ai:apitoken` + +#### 6. Validation +- **Mode Key:** Must match pattern `^[a-zA-Z0-9_@.-]+$` +- **Required Fields:** `display:name`, `ai:apitype`, `ai:model` +- **Azure Resource Name:** Must match `^[a-z0-9]([a-z0-9-]*[a-z0-9])?$` (1-63 chars) +- **Provider:** Must be one of the valid enum values +- **API Type:** Must be valid enum value +- **Thinking Level:** Must be low/medium/high if present +- **Capabilities:** Must be from valid enum (pdfs, images, tools) + +#### 7. Smart Defaults +When provider changes or model changes: +- Show info about what will be auto-configured +- Display computed endpoint (read-only with info icon) +- Display auto-detected API type (editable with warning) +- Pre-fill common values based on provider + +### UI Components Needed + +#### New Components +```typescript +// Main container +WaveAIVisualContent + +// Left panel +ModeList +├─ ModeListItem (icon, name, provider badge, premium badge, drag handle) +└─ AddModeButton + +// Right panel - viewer +ModeViewer +├─ ModeHeader (name, icon, actions) +├─ DisplaySection (read-only view of display fields) +├─ ProviderSection (read-only view of provider config) +└─ EditButton + +// Right panel - editor +ModeEditor +├─ ProviderSelector (dropdown, only for new modes) +├─ DisplayFieldsForm +├─ ProviderFieldsForm (dynamic based on provider) +│ ├─ WaveProviderForm +│ ├─ OpenAIProviderForm +│ ├─ OpenRouterProviderForm +│ ├─ AzureProviderForm +│ ├─ AzureLegacyProviderForm +│ ├─ GoogleProviderForm +│ └─ CustomProviderForm +└─ ActionButtons (Edit Raw JSON, Delete, Cancel) + +// Modals +RawJSONModal +├─ Title ("Edit Raw JSON: {mode name}") +├─ MonacoEditor (JSON, single mode object) +├─ ValidationErrors (inline display) +└─ Actions (Cancel, Save) + +// Shared components +SecretSelector (dropdown + link to secrets) +InfoTooltip (explains auto-configured fields) +ProviderBadge (visual indicator) +IconPicker (select from available icons) +DragHandle (for reordering modes in list) +``` + +**Drag & Drop for Reordering:** +```typescript +// Reordering updates display:order automatically +function handleModeReorder(draggedKey: string, targetKey: string) { + const modes = parseAIModes(fileContent); + const modesList = Object.entries(modes) + .sort((a, b) => (a[1]["display:order"] || 0) - (b[1]["display:order"] || 0)); + + // Find indices + const draggedIndex = modesList.findIndex(([k]) => k === draggedKey); + const targetIndex = modesList.findIndex(([k]) => k === targetKey); + + // Recalculate display:order for all modes + const newOrder = [...modesList]; + newOrder.splice(draggedIndex, 1); + newOrder.splice(targetIndex, 0, modesList[draggedIndex]); + + // Assign new order values (0, 10, 20, 30...) + newOrder.forEach(([key, mode], index) => { + modes[key] = { ...mode, "display:order": index * 10 }; + }); + + updateFileContent(JSON.stringify(modes, null, 2)); +} +``` + +### Model Extensions (Minimal) + +**No new atoms needed!** Visual editor uses existing `fileContentAtom`: + +```typescript +// Use existing atoms from WaveConfigViewModel: +// - fileContentAtom (contains JSON string) +// - hasEditedAtom (tracks if modified) +// - errorMessageAtom (for errors) + +// Visual editor parses fileContentAtom on render: +function parseAIModes(jsonString: string): Record | null { + try { + return JSON.parse(jsonString); + } catch { + return null; // Show "invalid JSON" error + } +} + +// Updates modify fileContentAtom: +function updateMode(key: string, mode: AIModeConfigType) { + const modes = parseAIModes(globalStore.get(model.fileContentAtom)); + if (!modes) return; + + modes[key] = mode; + const newJson = JSON.stringify(modes, null, 2); + globalStore.set(model.fileContentAtom, newJson); + globalStore.set(model.hasEditedAtom, true); +} + +// Secrets use existing model methods: +// - model.refreshSecrets() - already exists +// - RpcApi.GetSecretsCommand() - check if secret exists +// - RpcApi.SetSecretsCommand() - set secret value +``` + +**Component State (useState):** +```typescript +// In WaveAIVisualContent component: +const [selectedModeKey, setSelectedModeKey] = useState(null); +const [isAddingMode, setIsAddingMode] = useState(false); +const [showSecretModal, setShowSecretModal] = useState(false); +const [secretModalProvider, setSecretModalProvider] = useState(""); +``` + +### Implementation Phases + +#### Phase 1: Foundation & List View +- Parse `fileContentAtom` JSON into modes on render +- Display mode list (left panel, ~300px) + - Built-in modes with 🔒 icon at top + - Custom modes below + - Sort by `display:order` +- Select mode → show in right panel (empty state initially) +- Handle invalid JSON → show error, switch to JSON tab + +#### Phase 2: Built-in Mode Viewer +- Click built-in mode → show read-only details +- Display all fields (display, provider, config) +- "Built-in Mode" badge/banner +- No edit/delete buttons + +#### Phase 3: Custom Mode Editor (Basic) +- Click custom mode → load into editor form +- Display fields (name, icon, order, description) +- Provider field (read-only, badge) +- Model field (text input) +- Save → update `fileContentAtom` JSON +- Cancel → revert to previous selection + +#### Phase 4: Provider-Specific Fields +- Dynamic form based on provider type +- OpenAI: model, thinking level, capabilities +- Azure: resource name, model, thinking, capabilities +- Azure Legacy: resource name, deployment, model +- OpenRouter: model +- Google: model, API type, endpoint +- Custom: everything manual +- Info tooltips for auto-configured fields + +#### Phase 5: Secret Integration +- Check secret existence on mode select +- Display status icon (✅ / ⚠️) +- Click icon → open secret modal +- Secret modal: fixed name (or custom input), password field +- Save secret → immediate RPC call +- Update status icon after save + +#### Phase 6: Add New Mode +- "Add New Mode" button +- Provider dropdown selector +- Auto-generate mode key from provider + model +- Form with provider-specific fields +- Add to modes → update JSON → mark edited +- Select newly created mode + +#### Phase 7: Delete Mode +- Delete button for custom modes only +- Simple confirmation dialog +- Remove from modes → update JSON → deselect + +#### Phase 8: Raw JSON Editor +- "Edit Raw JSON" button in mode editor (all modes) +- Modal with Monaco editor for single mode +- JSON validation before save: + - Syntax check with error highlighting + - Required fields check (`display:name`, `ai:apitype`, `ai:model`) + - Enum validation (provider, apitype, thinkinglevel, capabilities) + - Display specific error messages per validation failure +- Parse validated JSON and update mode in main JSON +- Useful for edge cases (modes without provider) and power users + +#### Phase 9: Drag & Drop Reordering +- Add drag handle icon to custom mode list items +- Implement drag & drop functionality: + - Visual feedback during drag (opacity, cursor) + - Drop target highlighting + - Smooth reordering animation +- On drop: + - Recalculate `display:order` for all affected modes + - Use spacing (0, 10, 20, 30...) for easy manual adjustment + - Update JSON with new order values + - Built-in modes always stay at top (negative order values) + +#### Phase 10: Polish & UX Refinements +- Field validation with inline error messages +- Empty state when no mode selected +- Icon picker dropdown (Font Awesome icons) +- Capabilities checkboxes with descriptions +- Thinking level dropdown with explanations +- Help tooltips throughout +- Keyboard shortcuts (e.g., Ctrl/Cmd+E for raw JSON) +- Loading states for secret checks +- Smooth transitions and animations + +#### Phase 8: Raw JSON Editor +- "Edit Raw JSON" button in mode editor +- Modal with Monaco editor for single mode +- JSON validation before save: + - Syntax check + - Required fields check + - Enum validation + - Display specific error messages +- Parse and update mode in main JSON + +#### Phase 9: Drag & Drop Reordering +- Make mode list items draggable (custom modes only) +- Visual feedback during drag (drag handle icon) +- Drop target highlighting +- On drop: + - Calculate new `display:order` values + - Maintain spacing between modes + - Update all affected modes in JSON + - Preserve built-in modes at top + +#### Phase 10: Polish & UX Refinements +- Field validation (required, format) +- Error messages inline +- Empty state when no mode selected +- Icon picker dropdown +- Capabilities checkboxes +- Thinking level dropdown +- Help tooltips throughout +- Keyboard shortcuts (e.g., Cmd+E for raw JSON) + +### Technical Considerations + +1. **JSON Sync:** Parse/stringify from `fileContentAtom` on every read/write +2. **Validation:** Validate on blur or before updating JSON +3. **Built-in Detection:** Check if key starts with `waveai@` → read-only +4. **Type Safety:** Use `AIModeConfigType` from gotypes.d.ts +5. **State Management:** + - Model atoms for shared state (`fileContentAtom`, `hasEditedAtom`) + - Component useState for UI state (selected mode, modals) +6. **Error Handling:** + - Invalid JSON → show message, disable visual editor + - Parse errors → gracefully handle, don't crash +7. **Performance:** + - Parse JSON on mount and when `fileContentAtom` changes externally + - Debounce frequent updates if needed +8. **Secret Checks:** + - Load secret existence on mode select + - Cache results to avoid repeated RPC calls + +### Testing Strategy + +1. **Unit Tests:** Validation functions, key generation +2. **Integration Tests:** Form submission, backend sync +3. **E2E Tests:** Full add/edit/delete flows +4. **Provider Tests:** Each provider form with various inputs +5. **Edge Cases:** Empty config, invalid JSON, malformed data + +### Documentation Needs + +1. **In-app help:** Tooltips and info bubbles explaining fields +2. **Provider guides:** What each provider needs, where to get API keys +3. **Examples:** Show example configurations for common setups +4. **Troubleshooting:** Common errors and solutions + +## Next Steps + +1. Create detailed mockups/wireframes +2. Implement Phase 1 (basic list view) +3. Add RPC methods if needed for secrets integration +4. Iterate on provider forms +5. Polish and ship + +This design provides a user-friendly way to configure AI modes without directly editing JSON, while still maintaining the power and flexibility of the underlying system. \ No newline at end of file diff --git a/cmd/server/main-server.go b/cmd/server/main-server.go index 30540e351f..7971511159 100644 --- a/cmd/server/main-server.go +++ b/cmd/server/main-server.go @@ -91,6 +91,9 @@ func doShutdown(reason string) { // watch stdin, kill server if stdin is closed func stdinReadWatch() { + defer func() { + panichandler.PanicHandler("stdinReadWatch", recover()) + }() buf := make([]byte, 1024) for { _, err := os.Stdin.Read(buf) @@ -109,6 +112,9 @@ func startConfigWatcher() { } func telemetryLoop() { + defer func() { + panichandler.PanicHandler("telemetryLoop", recover()) + }() var nextSend int64 time.Sleep(InitialTelemetryWait) for { @@ -120,6 +126,42 @@ func telemetryLoop() { } } +func sendNoTelemetryUpdate(telemetryEnabled bool) { + ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelFn() + clientData, err := wstore.DBGetSingleton[*waveobj.Client](ctx) + if err != nil { + log.Printf("telemetry update: error getting client data: %v\n", err) + return + } + if clientData == nil { + log.Printf("telemetry update: client data is nil\n") + return + } + err = wcloud.SendNoTelemetryUpdate(ctx, clientData.OID, !telemetryEnabled) + if err != nil { + log.Printf("[error] sending no-telemetry update: %v\n", err) + return + } +} + +func setupTelemetryConfigHandler() { + watcher := wconfig.GetWatcher() + if watcher == nil { + return + } + currentConfig := watcher.GetFullConfig() + currentTelemetryEnabled := currentConfig.Settings.TelemetryEnabled + + watcher.RegisterUpdateHandler(func(newConfig wconfig.FullConfigType) { + newTelemetryEnabled := newConfig.Settings.TelemetryEnabled + if newTelemetryEnabled != currentTelemetryEnabled { + currentTelemetryEnabled = newTelemetryEnabled + go sendNoTelemetryUpdate(newTelemetryEnabled) + } + }) +} + func backupCleanupLoop() { defer func() { panichandler.PanicHandler("backupCleanupLoop", recover()) @@ -232,6 +274,9 @@ func beforeSendActivityUpdate(ctx context.Context) { } func startupActivityUpdate(firstLaunch bool) { + defer func() { + panichandler.PanicHandler("startupActivityUpdate", recover()) + }() ctx, cancelFn := context.WithTimeout(context.Background(), 5*time.Second) defer cancelFn() activity := wshrpc.ActivityUpdate{Startup: 1} @@ -476,11 +521,17 @@ func main() { maybeStartPprofServer() go stdinReadWatch() go telemetryLoop() + setupTelemetryConfigHandler() go updateTelemetryCountsLoop() go backupCleanupLoop() go startupActivityUpdate(firstLaunch) // must be after startConfigWatcher() blocklogger.InitBlockLogger() - go wavebase.GetSystemSummary() // get this cached (used in AI) + go func() { + defer func() { + panichandler.PanicHandler("GetSystemSummary", recover()) + }() + wavebase.GetSystemSummary() + }() webListener, err := web.MakeTCPListener("web") if err != nil { diff --git a/cmd/testai/main-testai.go b/cmd/testai/main-testai.go index 8e8fcdb3eb..04c3ae91f9 100644 --- a/cmd/testai/main-testai.go +++ b/cmd/testai/main-testai.go @@ -166,7 +166,7 @@ func testOpenAIComp(ctx context.Context, model, message string, tools []uctypes. opts := &uctypes.AIOptsType{ APIType: uctypes.APIType_OpenAIChat, APIToken: apiKey, - BaseURL: "https://api.openai.com/v1/chat/completions", + Endpoint: "https://api.openai.com/v1/chat/completions", Model: model, MaxTokens: 4096, ThinkingLevel: uctypes.ThinkingLevelMedium, @@ -216,7 +216,7 @@ func testOpenRouter(ctx context.Context, model, message string, tools []uctypes. opts := &uctypes.AIOptsType{ APIType: uctypes.APIType_OpenAIChat, APIToken: apiKey, - BaseURL: "https://openrouter.ai/api/v1/chat/completions", + Endpoint: "https://openrouter.ai/api/v1/chat/completions", Model: model, MaxTokens: 4096, ThinkingLevel: uctypes.ThinkingLevelMedium, diff --git a/docs/docs/ai-presets.mdx b/docs/docs/ai-presets.mdx index b8c7b34546..6321dae3ad 100644 --- a/docs/docs/ai-presets.mdx +++ b/docs/docs/ai-presets.mdx @@ -1,7 +1,7 @@ --- sidebar_position: 3.6 id: "ai-presets" -title: "AI Presets" +title: "AI Presets (Deprecated)" --- :::warning Deprecation Notice The AI Widget and its presets are being replaced by [Wave AI](./waveai.mdx). Please refer to the Wave AI documentation for the latest AI features and configuration options. diff --git a/docs/docs/connections.mdx b/docs/docs/connections.mdx index 77dc4aacd6..08a8ac2632 100644 --- a/docs/docs/connections.mdx +++ b/docs/docs/connections.mdx @@ -4,6 +4,8 @@ id: "connections" title: "Connections" --- +import { VersionBadge } from "@site/src/components/versionbadge"; + # Connections Wave allows users to connect to various machines and unify them together in a way that preserves the unique behavior of each. At the moment, this extends to SSH remote connections, local WSL connections, and AWS S3 buckets. @@ -156,6 +158,7 @@ In addition to the regular ssh config file, wave also has its own config file to | ssh:batchmode | A boolean indicating if password and passphrase prompts should be skipped. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored.| | ssh:pubkeyauthentication | A boolean indicating if public key authentication is enabled. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored.| | ssh:passwordauthentication | A boolean indicating if password authentication is enabled. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored. | +| ssh:passwordsecretname | A string specifying the name of a secret stored in the [secret store](/secrets) to use as the SSH password. When set, this password will be automatically used for password authentication instead of prompting the user. | | ssh:kbdinteractiveauthentication | A boolean indicating if keyboard interactive authentication is enabled. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored. | | ssh:preferredauthentications | A list of strings indicating an ordering of different types of authentications. Each authentication type will be tried in order. This supports `"publickey"`, `"keyboard-interactive"`, and `"password"` as valid types. Other types of authentication are not handled and will be skipped. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored.| | ssh:addkeystoagent | A boolean indicating if the keys used for a connection should be added to the ssh agent. Can be used to override the value in `~/.ssh/config` or to set it if the ssh config is being ignored.| diff --git a/docs/docs/faq.mdx b/docs/docs/faq.mdx index 74967cbb91..37c714e610 100644 --- a/docs/docs/faq.mdx +++ b/docs/docs/faq.mdx @@ -6,25 +6,6 @@ title: "FAQ" # FAQ -### How do I enable Claude Code support with Shift+Enter? - -Wave supports Claude Code and similar AI coding tools that expect Shift+Enter to send an escape sequence + newline (`\u001b\n`) instead of a regular carriage return. This can be enabled using the `term:shiftenternewline` configuration setting. - -To enable this globally for all terminals: -```bash -wsh setconfig term:shiftenternewline=true -``` - -To enable this for just a specific terminal block: -```bash -wsh setmeta term:shiftenternewline=true -``` - -You can also set this in your [settings.json](./config) file: -```json -"term:shiftenternewline": true -``` - ### How can I see the block numbers? The block numbers will appear when you hold down Ctrl-Shift (and disappear once you release the key combo). @@ -48,87 +29,6 @@ Just remember in JSON, backslashes need to be escaped. So add this to your [sett `wsh` is an internal CLI for extending control over Wave to the command line, you can learn more about it [here](./wsh). To prevent misuse by other applications, `wsh` requires an access token provided by Wave to work and will not function outside of the app. -### How do I make new blocks or splits inherit my shell’s current directory? - -Wave uses a special escape sequence (OSC 7) to track the shell’s working directory and maintain the working directory of new terminal blocks and splits. Wave listens for these sequences to update its `cmd:cwd` metadata. That metadata is copied to new blocks when you: - -- Open a new terminal block (Alt N / Cmd N) -- Split a pane (Cmd D / Cmd Shift D) - -Not all shells emit this escape sequence, so new blocks or splits may start in your home directory instead. To ensure your shell emits the OSC 7 escape sequence, add the following to your shell startup/config file and restart Wave (or source your config). - -#### Bash - -Add to `~/.bashrc` or `~/.bash_profile`: - -```bash -# Emit OSC 7 on each prompt to tell terminal about new working directory -__update_cwd() { - # Only run in interactive shells - [[ $- == *i* ]] || return - # Only run if attached to a terminal - [ -t 1 ] || return - # Redirect to tty so output doesn't show in shell - printf "\033]7;file://%s%s\007" "$HOSTNAME" "${PWD// /%20}" > /dev/tty -} -if [[ -n "$PROMPT_COMMAND" ]]; then - export PROMPT_COMMAND="__update_cwd; $PROMPT_COMMAND" -else - export PROMPT_COMMAND="__update_cwd" -fi -``` - -#### Zsh - -Add to `~/.zshrc`: - -```zsh -# Emit OSC 7 escape on directory change and prompt -function _wave_emit_cwd() { - printf "\033]7;file://%s%s\007" "$HOSTNAME" "${PWD// /%20}" > /dev/tty -} -autoload -U add-zsh-hook -add-zsh-hook chpwd _wave_emit_cwd -add-zsh-hook precmd _wave_emit_cwd -``` - -#### Fish - -> Fish shell (v4.0.0 and later) emits OSC 7 by default—no config required. - -For older Fish versions, add to `~/.config/fish/config.fish`: - -```fish -# Emit OSC 7 on each PWD change -function _wave_emit_cwd --on-variable PWD - printf "\033]7;file://%s%s\007" (hostname) (string replace ' ' '%20' $PWD) > /dev/tty -end -``` - -After configuring, open a new block or split (Alt T / Cmd T, Alt N / Cmd N, Cmd D / Cmd Shift D) and verify blocks start in your current directory. - -#### Verifying Current Directory Preservation - -1. Open a Wave terminal block. -2. `cd` into a project folder, e.g. `cd ~/projects/foo`. -3. Right-click on the block's title bar and select "Copy BlockId" to retrieve the block’s ID. -4. Use the copied BlockId to retrieve the block’s metadata: - - ```bash - # Example: replace BLOCK_ID with your actual block reference - wsh getmeta --block BLOCK_ID - ``` - -5. Confirm the output JSON contains a `cmd:cwd` field, for example: - - ```json - { - "cmd:cwd": "/Users/you/projects/foo", - ... - } - ``` - -6. Open a new block or split the pane—both should start in `/Users/you/projects/foo`. ## Why does Wave warn me about ARM64 translation when it launches? diff --git a/docs/docs/secrets.mdx b/docs/docs/secrets.mdx new file mode 100644 index 0000000000..ab6f7902bc --- /dev/null +++ b/docs/docs/secrets.mdx @@ -0,0 +1,147 @@ +--- +sidebar_position: 3.2 +id: "secrets" +title: "Secrets" +--- + +import { VersionBadge } from "@site/src/components/versionbadge"; + +# Secrets + + + +Wave Terminal provides a secure way to store sensitive information like passwords, API keys, and tokens. Secrets are stored encrypted in your system's native keychain (macOS Keychain, Windows Credential Manager, or Linux Secret Service), ensuring your sensitive data remains protected. + +## Why Use Secrets? + +Secrets in Wave Terminal allow you to: + +- **Store SSH passwords** - Automatically authenticate to SSH connections without typing passwords +- **Manage API keys** - Keep API tokens, keys, and credentials secure +- **Share across sessions** - Access your secrets from any terminal block or remote connection +- **Avoid plaintext storage** - Never store sensitive data in configuration files or scripts + +## Opening the Secrets UI + +There are several ways to access the secrets management interface: + +1. **From the widgets bar** (recommended): + - Click the **** settings icon on the widgets bar + - Select **Secrets** from the menu + +2. **From the command line:** + ```bash + wsh secret ui + ``` + + +The secrets UI provides a visual interface to view, add, edit, and delete secrets. + +## Managing Secrets via CLI + +Wave Terminal provides a complete CLI for managing secrets from any terminal block: + +```bash +# List all secret names (not values) +wsh secret list + +# Get a specific secret value +wsh secret get MY_SECRET_NAME + +# Set a secret (format: name=value, no spaces around =) +wsh secret set GITHUB_TOKEN=ghp_xxxxxxxxxx +wsh secret set DB_PASSWORD=super_secure_password + +# Delete a secret +wsh secret delete MY_SECRET_NAME +``` + +## Secret Naming Rules + +Secret names must match the pattern: `^[A-Za-z][A-Za-z0-9_]*$` + +This means: +- Must start with a letter (A-Z or a-z) +- Can only contain letters, numbers, and underscores +- Cannot contain spaces or special characters + +**Valid names:** `MY_SECRET`, `ApiKey`, `ssh_password_1` +**Invalid names:** `123_SECRET`, `my-secret`, `secret name` + +## Using Secrets with SSH Connections + + + +Secrets can be used to automatically provide passwords for SSH connections, eliminating the need to type passwords repeatedly. + +### Configure in connections.json + +Add the `ssh:passwordsecretname` field to your connection configuration: + +```json +{ + "myserver": { + "ssh:hostname": "example.com", + "ssh:user": "myuser", + "ssh:passwordsecretname": "SERVER_PASSWORD" + } +} +``` + +Then store your password as a secret: + +```bash +wsh secret set SERVER_PASSWORD=my_actual_password +``` + +Now when Wave connects to `myserver`, it will automatically use the password from your secret store instead of prompting you. + +### Benefits + +- **Security**: Password stored encrypted in your system keychain +- **Convenience**: No need to type passwords for each connection +- **Flexibility**: Update passwords by changing the secret, not the configuration + +## Security Considerations + +- **Encrypted Storage**: Secrets are stored encrypted in your Wave configuration directory. The encryption key itself is protected by your operating system's secure credential storage (macOS Keychain, Windows Credential Manager, or Linux Secret Service). + +- **No Plaintext**: Secrets are never stored unencrypted in logs or accessible files. + +- **Access Control**: Secrets are only accessible to Wave Terminal. + + +## Storage Backend + +Wave Terminal automatically detects and uses the appropriate secret storage backend for your operating system: + +- **macOS**: Uses the macOS Keychain +- **Windows**: Uses Windows Credential Manager +- **Linux**: Uses the Secret Service API (freedesktop.org specification) + +:::warning Linux Secret Storage +On Linux systems, Wave requires a compatible secret service backend (typically GNOME Keyring or KWallet). These are usually pre-installed with your desktop environment. If no compatible backend is detected, you won't be able to set secrets, and the UI will display a warning. +::: + +## Troubleshooting + +### "No appropriate secret manager found" + +This error occurs on Linux when no compatible secret service backend is available. Install GNOME Keyring or KWallet and ensure the secret service is running. + +### Secret not found + +Ensure the secret name is spelled correctly (names are case-sensitive) and that the secret exists: + +```bash +wsh secret list +``` + +### Permission denied on Linux + +The secret service may require you to unlock your keyring. This typically happens after login. Consult your desktop environment's documentation for keyring management. + +## Related Documentation + +- [Connections](/connections) - Learn about SSH connections and configuration +- [wsh Command Reference](/wsh-reference#secret) - Complete CLI command documentation for secrets \ No newline at end of file diff --git a/docs/docs/waveai-modes.mdx b/docs/docs/waveai-modes.mdx new file mode 100644 index 0000000000..0794a61a3a --- /dev/null +++ b/docs/docs/waveai-modes.mdx @@ -0,0 +1,359 @@ +--- +sidebar_position: 1.6 +id: "waveai-modes" +title: "Wave AI (Local Models)" +--- + +Wave AI supports custom AI modes that allow you to use local models, custom API endpoints, and alternative AI providers. This gives you complete control over which models and providers you use with Wave's AI features. + +## Configuration Overview + +AI modes are configured in `~/.config/waveterm/waveai.json`. + +**To edit using the UI:** +1. Click the settings (gear) icon in the widget bar +2. Select "Settings" from the menu +3. Choose "Wave AI Modes" from the settings sidebar + +**Or edit from the command line:** +```bash +wsh editconfig waveai.json +``` + +Each mode defines a complete AI configuration including the model, API endpoint, authentication, and display properties. + +## Provider-Based Configuration + +Wave AI now supports provider-based configuration which automatically applies sensible defaults for common providers. By specifying the `ai:provider` field, you can significantly simplify your configuration as the system will automatically set up endpoints, API types, and secret names. + +### Supported Providers + +- **`openai`** - OpenAI API (automatically configures endpoint and secret name) +- **`openrouter`** - OpenRouter API (automatically configures endpoint and secret name) +- **`google`** - Google AI (Gemini) +- **`azure`** - Azure OpenAI Service (modern API) +- **`azure-legacy`** - Azure OpenAI Service (legacy deployment API) +- **`custom`** - Custom API endpoint (fully manual configuration) + +### Supported API Types + +Wave AI supports two OpenAI-compatible API types: + +- **`openai-chat`**: Uses the `/v1/chat/completions` endpoint (most common) +- **`openai-responses`**: Uses the `/v1/responses` endpoint (modern API for GPT-5+ models) + +## Configuration Structure + +### Minimal Configuration (with Provider) + +```json +{ + "mode-key": { + "display:name": "Display Name", + "ai:provider": "openrouter", + "ai:model": "qwen/qwen-2.5-coder-32b-instruct" + } +} +``` + +### Full Configuration (all fields) + +```json +{ + "mode-key": { + "display:name": "Display Name", + "display:order": 1, + "display:icon": "icon-name", + "display:description": "Full description", + "ai:provider": "custom", + "ai:apitype": "openai-chat", + "ai:model": "model-name", + "ai:thinkinglevel": "medium", + "ai:endpoint": "http://localhost:11434/v1/chat/completions", + "ai:azureapiversion": "v1", + "ai:apitoken": "your-token", + "ai:apitokensecretname": "PROVIDER_KEY", + "ai:azureresourcename": "your-resource", + "ai:azuredeployment": "your-deployment", + "ai:capabilities": ["tools", "images", "pdfs"] + } +} +``` + +### Field Reference + +| Field | Required | Description | +|-------|----------|-------------| +| `display:name` | Yes | Name shown in the AI mode selector | +| `display:order` | No | Sort order in the selector (lower numbers first) | +| `display:icon` | No | Icon identifier for the mode | +| `display:description` | No | Full description of the mode | +| `ai:provider` | No | Provider preset: `openai`, `openrouter`, `google`, `azure`, `azure-legacy`, `custom` | +| `ai:apitype` | No | API type: `openai-chat` or `openai-responses` (defaults to `openai-chat` if not specified) | +| `ai:model` | No | Model identifier (required for most providers) | +| `ai:thinkinglevel` | No | Thinking level: `low`, `medium`, or `high` | +| `ai:endpoint` | No | Full API endpoint URL (auto-set by provider when available) | +| `ai:azureapiversion` | No | Azure API version (for `azure-legacy` provider, defaults to `2025-04-01-preview`) | +| `ai:apitoken` | No | API key/token (not recommended - use secrets instead) | +| `ai:apitokensecretname` | No | Name of secret containing API token (auto-set by provider) | +| `ai:azureresourcename` | No | Azure resource name (for Azure providers) | +| `ai:azuredeployment` | No | Azure deployment name (for `azure-legacy` provider) | +| `ai:capabilities` | No | Array of supported capabilities: `"tools"`, `"images"`, `"pdfs"` | +| `waveai:cloud` | No | Internal - for Wave Cloud AI configuration only | +| `waveai:premium` | No | Internal - for Wave Cloud AI configuration only | + +### AI Capabilities + +The `ai:capabilities` field specifies what features the AI mode supports: + +- **`tools`** - Enables AI tool usage for file reading/writing, shell integration, and widget interaction +- **`images`** - Allows image attachments in chat (model can view uploaded images) +- **`pdfs`** - Allows PDF file attachments in chat (model can read PDF content) + +Most models support `tools` and can benefit from it. Vision-capable models should include `images`. Not all models support PDFs, so only include `pdfs` if your model can process them. + +## Local Model Examples + +### Ollama + +[Ollama](https://ollama.ai) provides an OpenAI-compatible API for running models locally: + +```json +{ + "ollama-llama": { + "display:name": "Ollama - Llama 3.3", + "display:order": 1, + "display:icon": "llama", + "display:description": "Local Llama 3.3 70B model via Ollama", + "ai:apitype": "openai-chat", + "ai:model": "llama3.3:70b", + "ai:thinkinglevel": "normal", + "ai:endpoint": "http://localhost:11434/v1/chat/completions", + "ai:apitoken": "ollama" + } +} +``` + +:::tip +The `ai:apitoken` field is required but Ollama ignores it - you can set it to any value like `"ollama"`. +::: + +### LM Studio + +[LM Studio](https://lmstudio.ai) provides a local server that can run various models: + +```json +{ + "lmstudio-qwen": { + "display:name": "LM Studio - Qwen", + "display:order": 2, + "display:icon": "server", + "display:description": "Local Qwen model via LM Studio", + "ai:apitype": "openai-chat", + "ai:model": "qwen/qwen-2.5-coder-32b-instruct", + "ai:thinkinglevel": "normal", + "ai:endpoint": "http://localhost:1234/v1/chat/completions", + "ai:apitoken": "not-needed" + } +} +``` + +### Jan + +[Jan](https://jan.ai) is another local AI runtime with OpenAI API compatibility: + +```json +{ + "jan-local": { + "display:name": "Jan", + "display:order": 3, + "display:icon": "server", + "display:description": "Local model via Jan", + "ai:apitype": "openai-chat", + "ai:model": "your-model-name", + "ai:thinkinglevel": "normal", + "ai:endpoint": "http://localhost:1337/v1/chat/completions", + "ai:apitoken": "not-needed" + } +} +``` + +## Cloud Provider Examples + +### OpenAI + +Using the `openai` provider automatically configures the endpoint and secret name: + +```json +{ + "openai-gpt4o": { + "display:name": "GPT-4o", + "ai:provider": "openai", + "ai:model": "gpt-4o" + } +} +``` + +The provider automatically sets: +- `ai:endpoint` to `https://api.openai.com/v1/chat/completions` +- `ai:apitype` to `openai-chat` (or `openai-responses` for GPT-5+ models) +- `ai:apitokensecretname` to `OPENAI_KEY` (store your OpenAI API key with this name) + +For newer models like GPT-4.1 or GPT-5, the API type is automatically determined: + +```json +{ + "openai-gpt41": { + "display:name": "GPT-4.1", + "ai:provider": "openai", + "ai:model": "gpt-4.1" + } +} +``` + +### OpenRouter + +[OpenRouter](https://openrouter.ai) provides access to multiple AI models. Using the `openrouter` provider simplifies configuration: + +```json +{ + "openrouter-qwen": { + "display:name": "OpenRouter - Qwen", + "ai:provider": "openrouter", + "ai:model": "qwen/qwen-2.5-coder-32b-instruct" + } +} +``` + +The provider automatically sets: +- `ai:endpoint` to `https://openrouter.ai/api/v1/chat/completions` +- `ai:apitype` to `openai-chat` +- `ai:apitokensecretname` to `OPENROUTER_KEY` (store your OpenRouter API key with this name) + +### Azure OpenAI (Modern API) + +For the modern Azure OpenAI API, use the `azure` provider: + +```json +{ + "azure-gpt4": { + "display:name": "Azure GPT-4", + "ai:provider": "azure", + "ai:model": "gpt-4", + "ai:azureresourcename": "your-resource-name" + } +} +``` + +The provider automatically sets: +- `ai:endpoint` to `https://your-resource-name.openai.azure.com/openai/v1/chat/completions` (or `/responses` for newer models) +- `ai:apitype` based on the model +- `ai:apitokensecretname` to `AZURE_OPENAI_KEY` (store your Azure OpenAI key with this name) + +### Azure OpenAI (Legacy Deployment API) + +For legacy Azure deployments, use the `azure-legacy` provider: + +```json +{ + "azure-legacy-gpt4": { + "display:name": "Azure GPT-4 (Legacy)", + "ai:provider": "azure-legacy", + "ai:azureresourcename": "your-resource-name", + "ai:azuredeployment": "your-deployment-name" + } +} +``` + +The provider automatically constructs the full endpoint URL and sets the API version (defaults to `2025-04-01-preview`). You can override the API version with `ai:azureapiversion` if needed. + +## Using Secrets for API Keys + +Instead of storing API keys directly in the configuration, you should use Wave's secret store to keep your credentials secure. Secrets are stored encrypted using your system's native keychain. + +### Storing an API Key + +**Using the Secrets UI (recommended):** +1. Click the settings (gear) icon in the widget bar +2. Select "Secrets" from the menu +3. Click "Add New Secret" +4. Enter the secret name (e.g., `OPENAI_API_KEY`) and your API key +5. Click "Save" + +**Or from the command line:** +```bash +wsh secret set OPENAI_KEY=sk-xxxxxxxxxxxxxxxx +wsh secret set OPENROUTER_KEY=sk-xxxxxxxxxxxxxxxx +``` + +### Referencing the Secret + +When using providers like `openai` or `openrouter`, the secret name is automatically set. Just ensure the secret exists with the correct name: + +```json +{ + "my-openai-mode": { + "display:name": "OpenAI GPT-4o", + "ai:provider": "openai", + "ai:model": "gpt-4o" + } +} +``` + +The `openai` provider automatically looks for the `OPENAI_KEY` secret. See the [Secrets documentation](./secrets.mdx) for more information on managing secrets securely in Wave. + +## Multiple Modes Example + +You can define multiple AI modes and switch between them easily: + +```json +{ + "ollama-llama": { + "display:name": "Ollama - Llama 3.3", + "display:order": 1, + "ai:model": "llama3.3:70b", + "ai:endpoint": "http://localhost:11434/v1/chat/completions", + "ai:apitoken": "ollama" + }, + "ollama-codellama": { + "display:name": "Ollama - CodeLlama", + "display:order": 2, + "ai:model": "codellama:34b", + "ai:endpoint": "http://localhost:11434/v1/chat/completions", + "ai:apitoken": "ollama" + }, + "openai-gpt4o": { + "display:name": "GPT-4o", + "display:order": 10, + "ai:provider": "openai", + "ai:model": "gpt-4o" + } +} +``` + +## Troubleshooting + +### Connection Issues + +If Wave can't connect to your model server: + +1. **For cloud providers with `ai:provider` set**: Ensure you have the correct secret stored (e.g., `OPENAI_KEY`, `OPENROUTER_KEY`) +2. **For local/custom endpoints**: Verify the server is running (`curl http://localhost:11434/v1/models` for Ollama) +3. Check the `ai:endpoint` is the complete endpoint URL including the path (e.g., `http://localhost:11434/v1/chat/completions`) +4. Verify the `ai:apitype` matches your server's API (defaults are usually correct when using providers) +5. Check firewall settings if using a non-localhost address + +### Model Not Found + +If you get "model not found" errors: + +1. Verify the model name matches exactly what your server expects +2. For Ollama, use `ollama list` to see available models +3. Some servers require prefixes or specific naming formats + +### API Type Selection + +- The API type defaults to `openai-chat` if not specified, which works for most providers +- Use `openai-chat` for Ollama, LM Studio, custom endpoints, and most cloud providers +- Use `openai-responses` for newer OpenAI models (GPT-5+) or when your provider specifically requires it +- Provider presets automatically set the correct API type when needed diff --git a/docs/src/components/versionbadge.css b/docs/src/components/versionbadge.css new file mode 100644 index 0000000000..4883d04aa0 --- /dev/null +++ b/docs/src/components/versionbadge.css @@ -0,0 +1,18 @@ +.version-badge { + display: inline-block; + padding: 0.125rem 0.5rem; + margin-left: 0.25rem; + font-size: 0.75rem; + font-weight: 600; + line-height: 1.5; + border-radius: 0.25rem; + background-color: var(--ifm-color-primary-lightest); + color: var(--ifm-background-color); + vertical-align: middle; + white-space: nowrap; +} + +[data-theme="dark"] .version-badge { + background-color: var(--ifm-color-primary-dark); + color: var(--ifm-background-color); +} \ No newline at end of file diff --git a/docs/src/components/versionbadge.tsx b/docs/src/components/versionbadge.tsx new file mode 100644 index 0000000000..36903ce8bd --- /dev/null +++ b/docs/src/components/versionbadge.tsx @@ -0,0 +1,9 @@ +import "./versionbadge.css"; + +interface VersionBadgeProps { + version: string; +} + +export function VersionBadge({ version }: VersionBadgeProps) { + return {version}; +} \ No newline at end of file diff --git a/emain/emain-menu.ts b/emain/emain-menu.ts index 36efa8ec65..84e9303220 100644 --- a/emain/emain-menu.ts +++ b/emain/emain-menu.ts @@ -412,6 +412,7 @@ function convertMenuDefArrToMenu( wc.send("contextmenu-click", menuDef.id); }, checked: menuDef.checked, + enabled: menuDef.enabled, }; if (menuDef.submenu != null) { menuItemTemplate.submenu = convertMenuDefArrToMenu(webContents, menuDef.submenu); diff --git a/frontend/app/aipanel/ai-utils.ts b/frontend/app/aipanel/ai-utils.ts index 1477db6af5..fce9a7194d 100644 --- a/frontend/app/aipanel/ai-utils.ts +++ b/frontend/app/aipanel/ai-utils.ts @@ -1,6 +1,8 @@ // Copyright 2025, Command Line Inc. // SPDX-License-Identifier: Apache-2.0 +import { sortByDisplayOrder } from "@/util/util"; + const TextFileLimit = 200 * 1024; // 200KB const PdfLimit = 5 * 1024 * 1024; // 5MB const ImageLimit = 10 * 1024 * 1024; // 10MB @@ -529,3 +531,44 @@ export const createImagePreview = async (file: File): Promise => img.src = url; }); }; + + +/** + * Filter and organize AI mode configs into Wave and custom provider groups + * Returns organized configs that should be displayed based on settings and premium status + */ +export interface FilteredAIModeConfigs { + waveProviderConfigs: Array<{ mode: string } & AIModeConfigType>; + otherProviderConfigs: Array<{ mode: string } & AIModeConfigType>; + shouldShowCloudModes: boolean; +} + +export const getFilteredAIModeConfigs = ( + aiModeConfigs: Record, + showCloudModes: boolean, + inBuilder: boolean, + hasPremium: boolean +): FilteredAIModeConfigs => { + const hideQuick = inBuilder && hasPremium; + + const allConfigs = Object.entries(aiModeConfigs) + .map(([mode, config]) => ({ mode, ...config })) + .filter((config) => !(hideQuick && config.mode === "waveai@quick")); + + const otherProviderConfigs = allConfigs + .filter((config) => config["ai:provider"] !== "wave") + .sort(sortByDisplayOrder); + + const hasCustomModels = otherProviderConfigs.length > 0; + const shouldShowCloudModes = showCloudModes || !hasCustomModels; + + const waveProviderConfigs = shouldShowCloudModes + ? allConfigs.filter((config) => config["ai:provider"] === "wave").sort(sortByDisplayOrder) + : []; + + return { + waveProviderConfigs, + otherProviderConfigs, + shouldShowCloudModes, + }; +}; diff --git a/frontend/app/aipanel/aimessage.tsx b/frontend/app/aipanel/aimessage.tsx index e6fb70ce11..1c9dea2b66 100644 --- a/frontend/app/aipanel/aimessage.tsx +++ b/frontend/app/aipanel/aimessage.tsx @@ -223,7 +223,7 @@ export const AIMessage = memo(({ message, isStreaming }: AIMessageProps) => { className={cn( "px-2 rounded-lg [&>*:first-child]:!mt-0", message.role === "user" - ? "py-2 bg-accent-800 text-white max-w-[calc(100%-50px)] @w450:max-w-[calc(100%-105px)]" + ? "py-2 bg-accent-800 text-white max-w-[calc(100%-50px)]" : "min-w-[min(100%,500px)]" )} > diff --git a/frontend/app/aipanel/aimode.tsx b/frontend/app/aipanel/aimode.tsx index d5ec9d3063..a30bc0136e 100644 --- a/frontend/app/aipanel/aimode.tsx +++ b/frontend/app/aipanel/aimode.tsx @@ -1,10 +1,11 @@ // Copyright 2025, Command Line Inc. // SPDX-License-Identifier: Apache-2.0 -import { atoms } from "@/app/store/global"; -import { cn, makeIconClass } from "@/util/util"; +import { atoms, createBlock, getSettingsKeyAtom } from "@/app/store/global"; +import { cn, fireAndForget, makeIconClass } from "@/util/util"; import { useAtomValue } from "jotai"; import { memo, useRef, useState } from "react"; +import { getFilteredAIModeConfigs } from "./ai-utils"; import { WaveAIModel } from "./waveai-model"; export const AIModeDropdown = memo(() => { @@ -12,20 +13,21 @@ export const AIModeDropdown = memo(() => { const aiMode = useAtomValue(model.currentAIMode); const aiModeConfigs = useAtomValue(model.aiModeConfigs); const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom); + const showCloudModes = useAtomValue(getSettingsKeyAtom("waveai:showcloudmodes")); + const defaultMode = useAtomValue(getSettingsKeyAtom("waveai:defaultmode")) ?? "waveai@balanced"; const [isOpen, setIsOpen] = useState(false); const dropdownRef = useRef(null); const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; - const hideQuick = model.inBuilder && hasPremium; - const sortedConfigs = Object.entries(aiModeConfigs) - .map(([mode, config]) => ({ mode, ...config })) - .sort((a, b) => { - const orderDiff = (a["display:order"] || 0) - (b["display:order"] || 0); - if (orderDiff !== 0) return orderDiff; - return (a["display:name"] || "").localeCompare(b["display:name"] || ""); - }) - .filter((config) => !(hideQuick && config.mode === "waveai@quick")); + const { waveProviderConfigs, otherProviderConfigs } = getFilteredAIModeConfigs( + aiModeConfigs, + showCloudModes, + model.inBuilder, + hasPremium + ); + + const hasBothModeTypes = waveProviderConfigs.length > 0 && otherProviderConfigs.length > 0; const handleSelect = (mode: string) => { const config = aiModeConfigs[mode]; @@ -37,13 +39,13 @@ export const AIModeDropdown = memo(() => { setIsOpen(false); }; - let currentMode = aiMode || "waveai@balanced"; + let currentMode = aiMode || defaultMode; const currentConfig = aiModeConfigs[currentMode]; if (currentConfig) { if (!hasPremium && currentConfig["waveai:premium"]) { currentMode = "waveai@quick"; } - if (hideQuick && currentMode === "waveai@quick") { + if (model.inBuilder && hasPremium && currentMode === "waveai@quick") { currentMode = "waveai@balanced"; } } @@ -53,7 +55,7 @@ export const AIModeDropdown = memo(() => { "display:icon": "question", }; - return ( + return (
+ ); + })} + {hasBothModeTypes && ( +
+ )} + {hasBothModeTypes && ( +
+ Custom +
+ )} + {otherProviderConfigs.map((config, index) => { + const isFirst = index === 0 && !hasBothModeTypes; + const isLast = index === otherProviderConfigs.length - 1; + const isDisabled = !hasPremium && config["waveai:premium"]; + const isSelected = currentMode === config.mode; + return ( + ); })} +
+
)} diff --git a/frontend/app/aipanel/aipanel-contextmenu.ts b/frontend/app/aipanel/aipanel-contextmenu.ts index 05060b5e64..2c4766f90e 100644 --- a/frontend/app/aipanel/aipanel-contextmenu.ts +++ b/frontend/app/aipanel/aipanel-contextmenu.ts @@ -1,9 +1,10 @@ // Copyright 2025, Command Line Inc. // SPDX-License-Identifier: Apache-2.0 +import { getFilteredAIModeConfigs } from "@/app/aipanel/ai-utils"; import { waveAIHasSelection } from "@/app/aipanel/waveai-focus-utils"; import { ContextMenuModel } from "@/app/store/contextmenu"; -import { atoms, isDev } from "@/app/store/global"; +import { atoms, getSettingsKeyAtom, isDev } from "@/app/store/global"; import { globalStore } from "@/app/store/jotaiStore"; import { RpcApi } from "@/app/store/wshclientapi"; import { TabRpcClient } from "@/app/store/wshrpcutil"; @@ -41,49 +42,76 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo const rateLimitInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom); const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; + const aiModeConfigs = globalStore.get(model.aiModeConfigs); + const showCloudModes = globalStore.get(getSettingsKeyAtom("waveai:showcloudmodes")); const currentAIMode = rtInfo?.["waveai:mode"] ?? (hasPremium ? "waveai@balanced" : "waveai@quick"); const defaultTokens = model.inBuilder ? 24576 : 4096; const currentMaxTokens = rtInfo?.["waveai:maxoutputtokens"] ?? defaultTokens; - const aiModeSubmenu: ContextMenuItem[] = [ - { - label: "Quick (gpt-5-mini)", - type: "checkbox", - checked: currentAIMode === "waveai@quick", - click: () => { - RpcApi.SetRTInfoCommand(TabRpcClient, { - oref: model.orefContext, - data: { "waveai:mode": "waveai@quick" }, - }); - }, - }, - { - label: hasPremium ? "Balanced (gpt-5.1, low thinking)" : "Balanced (premium)", - type: "checkbox", - checked: currentAIMode === "waveai@balanced", - enabled: hasPremium, - click: () => { - if (!hasPremium) return; - RpcApi.SetRTInfoCommand(TabRpcClient, { - oref: model.orefContext, - data: { "waveai:mode": "waveai@balanced" }, - }); - }, - }, - { - label: hasPremium ? "Deep (gpt-5.1, full thinking)" : "Deep (premium)", - type: "checkbox", - checked: currentAIMode === "waveai@deep", - enabled: hasPremium, - click: () => { - if (!hasPremium) return; - RpcApi.SetRTInfoCommand(TabRpcClient, { - oref: model.orefContext, - data: { "waveai:mode": "waveai@deep" }, - }); - }, - }, - ]; + const { waveProviderConfigs, otherProviderConfigs } = getFilteredAIModeConfigs( + aiModeConfigs, + showCloudModes, + model.inBuilder, + hasPremium + ); + + const aiModeSubmenu: ContextMenuItem[] = []; + + if (waveProviderConfigs.length > 0) { + aiModeSubmenu.push({ + label: "Wave AI Modes", + type: "header", + enabled: false, + }); + + waveProviderConfigs.forEach(({ mode, ...config }) => { + const isPremium = config["waveai:premium"] === true; + const isEnabled = !isPremium || hasPremium; + aiModeSubmenu.push({ + label: config["display:name"] || mode, + type: "checkbox", + checked: currentAIMode === mode, + enabled: isEnabled, + click: () => { + if (!isEnabled) return; + RpcApi.SetRTInfoCommand(TabRpcClient, { + oref: model.orefContext, + data: { "waveai:mode": mode }, + }); + }, + }); + }); + } + + if (otherProviderConfigs.length > 0) { + if (waveProviderConfigs.length > 0) { + aiModeSubmenu.push({ type: "separator" }); + } + + aiModeSubmenu.push({ + label: "Custom Modes", + type: "header", + enabled: false, + }); + + otherProviderConfigs.forEach(({ mode, ...config }) => { + const isPremium = config["waveai:premium"] === true; + const isEnabled = !isPremium || hasPremium; + aiModeSubmenu.push({ + label: config["display:name"] || mode, + type: "checkbox", + checked: currentAIMode === mode, + enabled: isEnabled, + click: () => { + if (!isEnabled) return; + RpcApi.SetRTInfoCommand(TabRpcClient, { + oref: model.orefContext, + data: { "waveai:mode": mode }, + }); + }, + }); + }); + } const maxTokensSubmenu: ContextMenuItem[] = []; diff --git a/frontend/app/aipanel/aipanelmessages.tsx b/frontend/app/aipanel/aipanelmessages.tsx index 3d3ae0d912..1c55f1f071 100644 --- a/frontend/app/aipanel/aipanelmessages.tsx +++ b/frontend/app/aipanel/aipanelmessages.tsx @@ -58,10 +58,10 @@ export const AIPanelMessages = memo(({ messages, status, onContextMenu }: AIPane return (
-
+
{messages.map((message, index) => { diff --git a/frontend/app/aipanel/waveai-model.tsx b/frontend/app/aipanel/waveai-model.tsx index 34e11ec5ce..270796c4e4 100644 --- a/frontend/app/aipanel/waveai-model.tsx +++ b/frontend/app/aipanel/waveai-model.tsx @@ -8,7 +8,7 @@ import { WaveUIMessagePart, } from "@/app/aipanel/aitypes"; import { FocusManager } from "@/app/store/focusManager"; -import { atoms, createBlock, getOrefMetaKeyAtom } from "@/app/store/global"; +import { atoms, createBlock, getOrefMetaKeyAtom, getSettingsKeyAtom } from "@/app/store/global"; import { globalStore } from "@/app/store/jotaiStore"; import * as WOS from "@/app/store/wos"; import { RpcApi } from "@/app/store/wshclientapi"; @@ -77,6 +77,8 @@ export class WaveAIModel { private constructor(orefContext: ORef, inBuilder: boolean) { this.orefContext = orefContext; this.inBuilder = inBuilder; + const defaultMode = globalStore.get(getSettingsKeyAtom("waveai:defaultmode")) ?? "waveai@balanced"; + this.currentAIMode = jotai.atom(defaultMode); this.chatId = jotai.atom(null) as jotai.PrimitiveAtom; this.modelAtom = jotai.atom((get) => { @@ -365,7 +367,8 @@ export class WaveAIModel { } globalStore.set(this.chatId, chatIdValue); - const aiModeValue = rtInfo?.["waveai:mode"] ?? "waveai@balanced"; + const defaultMode = globalStore.get(getSettingsKeyAtom("waveai:defaultmode")) ?? "waveai@balanced"; + const aiModeValue = rtInfo?.["waveai:mode"] ?? defaultMode; globalStore.set(this.currentAIMode, aiModeValue); try { diff --git a/frontend/app/modals/conntypeahead.tsx b/frontend/app/modals/conntypeahead.tsx index bee43cb03d..b5e21c2257 100644 --- a/frontend/app/modals/conntypeahead.tsx +++ b/frontend/app/modals/conntypeahead.tsx @@ -272,11 +272,10 @@ function getConnectionsEditItem( onSelect: () => { util.fireAndForget(async () => { globalStore.set(changeConnModalAtom, false); - const path = `${getApi().getConfigDir()}/connections.json`; const blockDef: BlockDef = { meta: { - view: "preview", - file: path, + view: "waveconfig", + file: "connections.json", }, }; await createBlock(blockDef, false, true); diff --git a/frontend/app/modals/userinputmodal.scss b/frontend/app/modals/userinputmodal.scss deleted file mode 100644 index c630422cbc..0000000000 --- a/frontend/app/modals/userinputmodal.scss +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2024, Command Line Inc. -// SPDX-License-Identifier: Apache-2.0 - -.userinput-header { - font-weight: bold; - color: var(--main-text-color); - padding-bottom: 10px; -} - -.userinput-body { - display: flex; - flex-direction: column; - justify-content: space-between; - gap: 1rem; - margin: 0 1rem 1rem 1rem; - max-width: 500px; - - font: var(--fixed-font); - color: var(--main-text-color); - - .userinput-markdown { - color: inherit; - } - - .userinput-text { - } - - .userinput-inputbox { - resize: none; - background-color: var(--panel-bg-color); - border-radius: 6px; - margin: 0; - border: var(--border-color); - padding: 5px 0 5px 16px; - min-height: 30px; - color: inherit; - - &:hover { - cursor: text; - } - - &:focus { - outline-color: var(--accent-color); - } - } - - .userinput-checkbox-container { - display: flex; - flex-direction: column; - gap: 6px; - - .userinput-checkbox-row { - display: flex; - align-items: center; - gap: 6px; - - .userinput-checkbox { - accent-color: var(--accent-color); - } - } - } -} diff --git a/frontend/app/modals/userinputmodal.tsx b/frontend/app/modals/userinputmodal.tsx index d277a73236..fc97a185ee 100644 --- a/frontend/app/modals/userinputmodal.tsx +++ b/frontend/app/modals/userinputmodal.tsx @@ -8,7 +8,6 @@ import * as keyutil from "@/util/keyutil"; import { fireAndForget } from "@/util/util"; import { useCallback, useEffect, useMemo, useRef, useState } from "react"; import { UserInputService } from "../store/services"; -import "./userinputmodal.scss"; const UserInputModal = (userInputRequest: UserInputRequest) => { const [responseText, setResponseText] = useState(""); @@ -68,21 +67,22 @@ const UserInputModal = (userInputRequest: UserInputRequest) => { (waveEvent: WaveKeyboardEvent): boolean => { if (keyutil.checkKeyPressed(waveEvent, "Escape")) { handleSendErrResponse(); - return; + return true; } if (keyutil.checkKeyPressed(waveEvent, "Enter")) { handleSubmit(); return true; } + return false; }, [handleSendErrResponse, handleSubmit] ); const queryText = useMemo(() => { if (userInputRequest.markdown) { - return ; + return ; } - return {userInputRequest.querytext}; + return {userInputRequest.querytext}; }, [userInputRequest.markdown, userInputRequest.querytext]); const inputBox = useMemo(() => { @@ -95,7 +95,7 @@ const UserInputModal = (userInputRequest: UserInputRequest) => { onChange={(e) => setResponseText(e.target.value)} value={responseText} maxLength={400} - className="userinput-inputbox" + className="resize-none bg-panel rounded-md border border-border py-1.5 pl-4 min-h-[30px] text-inherit cursor-text focus:ring-2 focus:ring-accent focus:outline-none" autoFocus={true} onKeyDown={(e) => keyutil.keydownWrapper(handleKeyDown)(e)} /> @@ -107,15 +107,15 @@ const UserInputModal = (userInputRequest: UserInputRequest) => { return <>; } return ( -
-
+
+
- +
); @@ -148,14 +148,15 @@ const UserInputModal = (userInputRequest: UserInputRequest) => { return ( handleSubmit()} onCancel={() => handleNegativeResponse()} onClose={() => handleSendErrResponse()} okLabel={userInputRequest.oklabel} cancelLabel={userInputRequest.cancellabel} > -
{userInputRequest.title + ` (${countdown}s)`}
-
+
{userInputRequest.title + ` (${countdown}s)`}
+
{queryText} {inputBox} {optionalCheckbox} diff --git a/frontend/app/view/waveconfig/secretscontent.tsx b/frontend/app/view/waveconfig/secretscontent.tsx new file mode 100644 index 0000000000..8be44f7756 --- /dev/null +++ b/frontend/app/view/waveconfig/secretscontent.tsx @@ -0,0 +1,389 @@ +// Copyright 2025, Command Line Inc. +// SPDX-License-Identifier: Apache-2.0 + +import { SecretNameRegex, type WaveConfigViewModel } from "@/app/view/waveconfig/waveconfig-model"; +import { cn } from "@/util/util"; +import { useAtomValue, useSetAtom } from "jotai"; +import { memo } from "react"; + +interface ErrorDisplayProps { + message: string; + variant?: "error" | "warning"; +} + +const ErrorDisplay = memo(({ message, variant = "error" }: ErrorDisplayProps) => { + const icon = variant === "error" ? "fa-circle-exclamation" : "fa-triangle-exclamation"; + const baseClasses = "flex items-center gap-2 p-4 border rounded-lg"; + const variantClasses = + variant === "error" + ? "bg-red-500/10 border-red-500/20 text-red-400" + : "bg-yellow-500/10 border-yellow-500/20 text-yellow-400"; + + return ( +
+ + {message} +
+ ); +}); +ErrorDisplay.displayName = "ErrorDisplay"; + +const LoadingSpinner = memo(({ message }: { message: string }) => { + return ( +
+ + {message} +
+ ); +}); +LoadingSpinner.displayName = "LoadingSpinner"; + +const EmptyState = memo(({ onAddSecret }: { onAddSecret: () => void }) => { + return ( +
+ +

No Secrets

+

Add a secret to get started

+ +
+ ); +}); +EmptyState.displayName = "EmptyState"; + +const CLIInfoBubble = memo(() => { + return ( +
+
+ +
CLI Access
+
+
+ wsh secret list +
+ wsh secret get [name] +
+ wsh secret set [name]=[value] +
+
+ ); +}); +CLIInfoBubble.displayName = "CLIInfoBubble"; + +interface SecretListViewProps { + secretNames: string[]; + onSelectSecret: (name: string) => void; + onAddSecret: () => void; +} + +const SecretListView = memo(({ secretNames, onSelectSecret, onAddSecret }: SecretListViewProps) => { + return ( +
+
+ {secretNames.map((name) => ( +
onSelectSecret(name)} + > + + {name} + +
+ ))} +
+ + Add New Secret +
+
+ +
+ ); +}); +SecretListView.displayName = "SecretListView"; + +interface AddSecretFormProps { + newSecretName: string; + newSecretValue: string; + isLoading: boolean; + onNameChange: (name: string) => void; + onValueChange: (value: string) => void; + onCancel: () => void; + onSubmit: () => void; +} + +const AddSecretForm = memo( + ({ + newSecretName, + newSecretValue, + isLoading, + onNameChange, + onValueChange, + onCancel, + onSubmit, + }: AddSecretFormProps) => { + const isNameInvalid = newSecretName !== "" && !SecretNameRegex.test(newSecretName); + + return ( +
+

Add New Secret

+
+ + onNameChange(e.target.value)} + placeholder="MY_SECRET_NAME" + disabled={isLoading} + /> +
+ Must start with a letter and contain only letters, numbers, and underscores +
+
+
+ +