From cca274bc4a9132a053963326bf953419112b9e30 Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 14:50:52 -0800 Subject: [PATCH 01/31] first cut at a text-only backend for the /v1/chat/completions api --- cmd/testai/main-testai.go | 70 ++++++- pkg/aiusechat/aiutil/aiutil.go | 9 + .../openaicomp/openaicomp-backend.go | 193 ++++++++++++++++++ .../openaicomp/openaicomp-convertmessage.go | 147 +++++++++++++ pkg/aiusechat/openaicomp/openaicomp-types.go | 118 +++++++++++ pkg/aiusechat/usechat-backend.go | 41 ++++ pkg/aiusechat/usechat.go | 9 +- 7 files changed, 581 insertions(+), 6 deletions(-) create mode 100644 pkg/aiusechat/openaicomp/openaicomp-backend.go create mode 100644 pkg/aiusechat/openaicomp/openaicomp-convertmessage.go create mode 100644 pkg/aiusechat/openaicomp/openaicomp-types.go diff --git a/cmd/testai/main-testai.go b/cmd/testai/main-testai.go index eace7ca61a..a6f1a98ff8 100644 --- a/cmd/testai/main-testai.go +++ b/cmd/testai/main-testai.go @@ -155,6 +155,56 @@ func testOpenAI(ctx context.Context, model, message string, tools []uctypes.Tool } } +func testOpenAIComp(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) { + apiKey := os.Getenv("OPENAI_APIKEY") + if apiKey == "" { + fmt.Println("Error: OPENAI_APIKEY environment variable not set") + os.Exit(1) + } + + opts := &uctypes.AIOptsType{ + APIType: aiusechat.APIType_OpenAIComp, + APIToken: apiKey, + BaseURL: "https://api.openai.com/v1/chat/completions", + Model: model, + MaxTokens: 4096, + ThinkingLevel: uctypes.ThinkingLevelMedium, + } + + chatID := uuid.New().String() + + aiMessage := &uctypes.AIMessage{ + MessageId: uuid.New().String(), + Parts: []uctypes.AIMessagePart{ + { + Type: uctypes.AIMessagePartTypeText, + Text: message, + }, + }, + } + + fmt.Printf("Testing OpenAI Completions API with WaveAIPostMessageWrap, model: %s\n", model) + fmt.Printf("Message: %s\n", message) + fmt.Printf("Chat ID: %s\n", chatID) + fmt.Println("---") + + testWriter := &TestResponseWriter{} + sseHandler := sse.MakeSSEHandlerCh(testWriter, ctx) + defer sseHandler.Close() + + chatOpts := uctypes.WaveChatOpts{ + ChatId: chatID, + ClientId: uuid.New().String(), + Config: *opts, + Tools: tools, + SystemPrompt: []string{"You are a helpful assistant. Be concise and clear in your responses."}, + } + err := aiusechat.WaveAIPostMessageWrap(ctx, sseHandler, aiMessage, chatOpts) + if err != nil { + fmt.Printf("OpenAI Completions API streaming error: %v\n", err) + } +} + func testAnthropic(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) { apiKey := os.Getenv("ANTHROPIC_APIKEY") if apiKey == "" { @@ -217,18 +267,24 @@ func testT2(ctx context.Context) { testOpenAI(ctx, DefaultOpenAIModel, "what is 2+2+8, use the provider adder tool", tools) } +func testT3(ctx context.Context) { + testOpenAIComp(ctx, "gpt-4o", "what is 2+2? please be brief", nil) +} + func printUsage() { - fmt.Println("Usage: go run main-testai.go [--anthropic] [--tools] [--model ] [message]") + fmt.Println("Usage: go run main-testai.go [--anthropic|--openaicomp] [--tools] [--model ] [message]") fmt.Println("Examples:") fmt.Println(" go run main-testai.go 'What is 2+2?'") fmt.Println(" go run main-testai.go --model o4-mini 'What is 2+2?'") fmt.Println(" go run main-testai.go --anthropic 'What is 2+2?'") fmt.Println(" go run main-testai.go --anthropic --model claude-3-5-sonnet-20241022 'What is 2+2?'") + fmt.Println(" go run main-testai.go --openaicomp --model gpt-4o 'What is 2+2?'") fmt.Println(" go run main-testai.go --tools 'Help me configure GitHub Actions monitoring'") fmt.Println("") fmt.Println("Default models:") fmt.Printf(" OpenAI: %s\n", DefaultOpenAIModel) fmt.Printf(" Anthropic: %s\n", DefaultAnthropicModel) + fmt.Printf(" OpenAI Completions: gpt-4o\n") fmt.Println("") fmt.Println("Environment variables:") fmt.Println(" OPENAI_APIKEY (for OpenAI models)") @@ -236,14 +292,16 @@ func printUsage() { } func main() { - var anthropic, tools, help, t1, t2 bool + var anthropic, openaicomp, tools, help, t1, t2, t3 bool var model string flag.BoolVar(&anthropic, "anthropic", false, "Use Anthropic API instead of OpenAI") + flag.BoolVar(&openaicomp, "openaicomp", false, "Use OpenAI Completions API") flag.BoolVar(&tools, "tools", false, "Enable GitHub Actions Monitor tools for testing") flag.StringVar(&model, "model", "", fmt.Sprintf("AI model to use (defaults: %s for OpenAI, %s for Anthropic)", DefaultOpenAIModel, DefaultAnthropicModel)) flag.BoolVar(&help, "help", false, "Show usage information") flag.BoolVar(&t1, "t1", false, fmt.Sprintf("Run preset T1 test (%s with 'what is 2+2')", DefaultAnthropicModel)) flag.BoolVar(&t2, "t2", false, fmt.Sprintf("Run preset T2 test (%s with 'what is 2+2')", DefaultOpenAIModel)) + flag.BoolVar(&t3, "t3", false, "Run preset T3 test (OpenAI Completions API with gpt-4o)") flag.Parse() if help { @@ -262,11 +320,17 @@ func main() { testT2(ctx) return } + if t3 { + testT3(ctx) + return + } // Set default model based on API type if not provided if model == "" { if anthropic { model = DefaultAnthropicModel + } else if openaicomp { + model = "gpt-4o" } else { model = DefaultOpenAIModel } @@ -285,6 +349,8 @@ func main() { if anthropic { testAnthropic(ctx, model, message, toolDefs) + } else if openaicomp { + testOpenAIComp(ctx, model, message, toolDefs) } else { testOpenAI(ctx, model, message, toolDefs) } diff --git a/pkg/aiusechat/aiutil/aiutil.go b/pkg/aiusechat/aiutil/aiutil.go index fb9f8bb517..aa66529fab 100644 --- a/pkg/aiusechat/aiutil/aiutil.go +++ b/pkg/aiusechat/aiutil/aiutil.go @@ -180,3 +180,12 @@ func JsonEncodeRequestBody(reqBody any) (bytes.Buffer, error) { } return buf, nil } + +func IsOpenAIReasoningModel(model string) bool { + m := strings.ToLower(model) + return strings.HasPrefix(m, "o1") || + strings.HasPrefix(m, "o3") || + strings.HasPrefix(m, "o4") || + strings.HasPrefix(m, "gpt-5") || + strings.HasPrefix(m, "gpt-5.1") +} diff --git a/pkg/aiusechat/openaicomp/openaicomp-backend.go b/pkg/aiusechat/openaicomp/openaicomp-backend.go new file mode 100644 index 0000000000..7ef4823ae6 --- /dev/null +++ b/pkg/aiusechat/openaicomp/openaicomp-backend.go @@ -0,0 +1,193 @@ +// Copyright 2025, Command Line Inc. +// SPDX-License-Identifier: Apache-2.0 + +package openaicomp + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "strings" + "time" + + "github.com/google/uuid" + "github.com/launchdarkly/eventsource" + "github.com/wavetermdev/waveterm/pkg/aiusechat/chatstore" + "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" + "github.com/wavetermdev/waveterm/pkg/web/sse" +) + +// RunCompletionsChatStep executes a chat step using the completions API +func RunCompletionsChatStep( + ctx context.Context, + sseHandler *sse.SSEHandlerCh, + chatOpts uctypes.WaveChatOpts, + cont *uctypes.WaveContinueResponse, +) (*uctypes.WaveStopReason, []*CompletionsChatMessage, *uctypes.RateLimitInfo, error) { + if sseHandler == nil { + return nil, nil, nil, errors.New("sse handler is nil") + } + + chat := chatstore.DefaultChatStore.Get(chatOpts.ChatId) + if chat == nil { + return nil, nil, nil, fmt.Errorf("chat not found: %s", chatOpts.ChatId) + } + + if chatOpts.Config.TimeoutMs > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, time.Duration(chatOpts.Config.TimeoutMs)*time.Millisecond) + defer cancel() + } + + // Convert stored messages to completions format + var messages []CompletionsMessage + + // Add system prompt if provided + if len(chatOpts.SystemPrompt) > 0 { + messages = append(messages, CompletionsMessage{ + Role: "system", + Content: strings.Join(chatOpts.SystemPrompt, "\n"), + }) + } + + // Convert native messages + for _, genMsg := range chat.NativeMessages { + compMsg, ok := genMsg.(*CompletionsChatMessage) + if !ok { + return nil, nil, nil, fmt.Errorf("expected CompletionsChatMessage, got %T", genMsg) + } + messages = append(messages, compMsg.Message) + } + + req, err := buildCompletionsHTTPRequest(ctx, messages, chatOpts) + if err != nil { + return nil, nil, nil, err + } + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return nil, nil, nil, fmt.Errorf("request failed: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + bodyBytes, _ := io.ReadAll(resp.Body) + return nil, nil, nil, fmt.Errorf("API returned status %d: %s", resp.StatusCode, string(bodyBytes)) + } + + // Setup SSE if this is a new request (not a continuation) + if cont == nil { + if err := sseHandler.SetupSSE(); err != nil { + return nil, nil, nil, fmt.Errorf("failed to setup SSE: %w", err) + } + } + + // Stream processing + stopReason, assistantMsg, err := processCompletionsStream(ctx, resp.Body, sseHandler, chatOpts) + if err != nil { + return nil, nil, nil, err + } + + return stopReason, []*CompletionsChatMessage{assistantMsg}, nil, nil +} + +func processCompletionsStream( + ctx context.Context, + body io.Reader, + sseHandler *sse.SSEHandlerCh, + chatOpts uctypes.WaveChatOpts, +) (*uctypes.WaveStopReason, *CompletionsChatMessage, error) { + decoder := eventsource.NewDecoder(body) + var textBuilder strings.Builder + msgID := uuid.New().String() + textID := uuid.New().String() + var finishReason string + textStarted := false + + _ = sseHandler.AiMsgStart(msgID) + _ = sseHandler.AiMsgStartStep() + + for { + if err := ctx.Err(); err != nil { + _ = sseHandler.AiMsgError("request cancelled") + return &uctypes.WaveStopReason{ + Kind: uctypes.StopKindCanceled, + ErrorType: "cancelled", + ErrorText: "request cancelled", + }, nil, err + } + + event, err := decoder.Decode() + if err != nil { + if errors.Is(err, io.EOF) { + break + } + _ = sseHandler.AiMsgError(err.Error()) + return &uctypes.WaveStopReason{ + Kind: uctypes.StopKindError, + ErrorType: "stream", + ErrorText: err.Error(), + }, nil, fmt.Errorf("stream decode error: %w", err) + } + + data := event.Data() + if data == "[DONE]" { + break + } + + var chunk StreamChunk + if err := json.Unmarshal([]byte(data), &chunk); err != nil { + log.Printf("openaicomp: failed to parse chunk: %v\n", err) + continue + } + + if len(chunk.Choices) == 0 { + continue + } + + choice := chunk.Choices[0] + if choice.Delta.Content != "" { + if !textStarted { + _ = sseHandler.AiMsgTextStart(textID) + textStarted = true + } + textBuilder.WriteString(choice.Delta.Content) + _ = sseHandler.AiMsgTextDelta(textID, choice.Delta.Content) + } + + if choice.FinishReason != nil && *choice.FinishReason != "" { + finishReason = *choice.FinishReason + } + } + + stopKind := uctypes.StopKindDone + if finishReason == "length" { + stopKind = uctypes.StopKindMaxTokens + } + + stopReason := &uctypes.WaveStopReason{ + Kind: stopKind, + RawReason: finishReason, + } + + assistantMsg := &CompletionsChatMessage{ + MessageId: msgID, + Message: CompletionsMessage{ + Role: "assistant", + Content: textBuilder.String(), + }, + } + + if textStarted { + _ = sseHandler.AiMsgTextEnd(textID) + } + _ = sseHandler.AiMsgFinishStep() + _ = sseHandler.AiMsgFinish(finishReason, nil) + + return stopReason, assistantMsg, nil +} diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go new file mode 100644 index 0000000000..cb4082521b --- /dev/null +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -0,0 +1,147 @@ +// Copyright 2025, Command Line Inc. +// SPDX-License-Identifier: Apache-2.0 + +package openaicomp + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "log" + "net/http" + "strings" + + "github.com/wavetermdev/waveterm/pkg/aiusechat/aiutil" + "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" + "github.com/wavetermdev/waveterm/pkg/wavebase" +) + +const ( + OpenAICompDefaultMaxTokens = 4096 +) + +// buildCompletionsHTTPRequest creates an HTTP request for the OpenAI completions API +func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMessage, chatOpts uctypes.WaveChatOpts) (*http.Request, error) { + opts := chatOpts.Config + + if opts.Model == "" { + return nil, errors.New("opts.model is required") + } + if opts.BaseURL == "" { + return nil, errors.New("BaseURL is required") + } + + maxTokens := opts.MaxTokens + if maxTokens <= 0 { + maxTokens = OpenAICompDefaultMaxTokens + } + + finalMessages := messages + if len(chatOpts.SystemPrompt) > 0 { + systemMessage := CompletionsMessage{ + Role: "system", + Content: strings.Join(chatOpts.SystemPrompt, "\n"), + } + finalMessages = append([]CompletionsMessage{systemMessage}, messages...) + } + + reqBody := &CompletionsRequest{ + Model: opts.Model, + Messages: finalMessages, + Stream: true, + } + + if aiutil.IsOpenAIReasoningModel(opts.Model) { + reqBody.MaxCompletionTokens = maxTokens + } else { + reqBody.MaxTokens = maxTokens + } + + if wavebase.IsDevMode() { + log.Printf("openaicomp: model %s, messages: %d\n", opts.Model, len(messages)) + } + + buf, err := json.Marshal(reqBody) + if err != nil { + return nil, err + } + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, opts.BaseURL, bytes.NewReader(buf)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + if opts.APIToken != "" { + req.Header.Set("Authorization", "Bearer "+opts.APIToken) + } + req.Header.Set("Accept", "text/event-stream") + if chatOpts.ClientId != "" { + req.Header.Set("X-Wave-ClientId", chatOpts.ClientId) + } + if chatOpts.ChatId != "" { + req.Header.Set("X-Wave-ChatId", chatOpts.ChatId) + } + req.Header.Set("X-Wave-Version", wavebase.WaveVersion) + req.Header.Set("X-Wave-APIType", "openai-comp") + req.Header.Set("X-Wave-RequestType", chatOpts.GetWaveRequestType()) + + return req, nil +} + +// ConvertAIMessageToCompletionsMessage converts an AIMessage to CompletionsChatMessage +func ConvertAIMessageToCompletionsMessage(aiMsg uctypes.AIMessage) (*CompletionsChatMessage, error) { + if err := aiMsg.Validate(); err != nil { + return nil, fmt.Errorf("invalid AIMessage: %w", err) + } + + var textBuilder strings.Builder + for _, part := range aiMsg.Parts { + if part.Type == uctypes.AIMessagePartTypeText { + textBuilder.WriteString(part.Text) + } + } + + return &CompletionsChatMessage{ + MessageId: aiMsg.MessageId, + Message: CompletionsMessage{ + Role: "user", + Content: textBuilder.String(), + }, + }, nil +} + +// ConvertAIChatToUIChat converts stored chat to UI format +func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { + uiChat := &uctypes.UIChat{ + ChatId: aiChat.ChatId, + APIType: aiChat.APIType, + Model: aiChat.Model, + APIVersion: aiChat.APIVersion, + Messages: make([]uctypes.UIMessage, 0, len(aiChat.NativeMessages)), + } + + for _, genMsg := range aiChat.NativeMessages { + compMsg, ok := genMsg.(*CompletionsChatMessage) + if !ok { + continue + } + + uiMsg := uctypes.UIMessage{ + ID: compMsg.MessageId, + Role: compMsg.Message.Role, + Parts: []uctypes.UIMessagePart{ + { + Type: "text", + Text: compMsg.Message.Content, + }, + }, + } + + uiChat.Messages = append(uiChat.Messages, uiMsg) + } + + return uiChat, nil +} diff --git a/pkg/aiusechat/openaicomp/openaicomp-types.go b/pkg/aiusechat/openaicomp/openaicomp-types.go new file mode 100644 index 0000000000..0a2a596243 --- /dev/null +++ b/pkg/aiusechat/openaicomp/openaicomp-types.go @@ -0,0 +1,118 @@ +// Copyright 2025, Command Line Inc. +// SPDX-License-Identifier: Apache-2.0 + +package openaicomp + +import ( + "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" +) + +// OpenAI Completions API types (simplified) + +type CompletionsRequest struct { + Model string `json:"model"` + Messages []CompletionsMessage `json:"messages"` + Stream bool `json:"stream"` + MaxTokens int `json:"max_tokens,omitempty"` // legacy + MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` // newer + Temperature float64 `json:"temperature,omitempty"` + Tools []ToolDefinition `json:"tools,omitempty"` // if you use tools + ToolChoice any `json:"tool_choice,omitempty"` // "auto", "none", or struct +} + +type CompletionsMessage struct { + Role string `json:"role"` // "system","user","assistant","tool" + Content string `json:"content,omitempty"` // normal text messages + ToolCalls []ToolCall `json:"tool_calls,omitempty"` // assistant tool-call message + ToolCallID string `json:"tool_call_id,omitempty"` // for role:"tool" + Name string `json:"name,omitempty"` // tool name on role:"tool" +} + +type ToolDefinition struct { + Type string `json:"type"` // "function" + Function ToolFunctionDef `json:"function"` +} + +type ToolFunctionDef struct { + Name string `json:"name"` + Description string `json:"description,omitempty"` + Parameters map[string]any `json:"parameters,omitempty"` // or jsonschema struct +} + +type ToolCall struct { + ID string `json:"id"` + Type string `json:"type"` // "function" + Function ToolFunctionCall `json:"function"` +} + +type ToolFunctionCall struct { + Name string `json:"name"` + Arguments string `json:"arguments"` // raw JSON string +} + +type StreamChunk struct { + ID string `json:"id"` + Object string `json:"object"` + Created int64 `json:"created"` + Model string `json:"model"` + Choices []StreamChoice `json:"choices"` +} + +type StreamChoice struct { + Index int `json:"index"` + Delta ContentDelta `json:"delta"` + FinishReason *string `json:"finish_reason"` +} + +// This is the important part: +type ContentDelta struct { + Role string `json:"role,omitempty"` + Content string `json:"content,omitempty"` + ToolCalls []ToolCallDelta `json:"tool_calls,omitempty"` +} + +type ToolCallDelta struct { + Index int `json:"index"` + ID string `json:"id,omitempty"` // only on first chunk + Type string `json:"type,omitempty"` // "function" + Function *ToolFunctionDelta `json:"function,omitempty"` +} + +type ToolFunctionDelta struct { + Name string `json:"name,omitempty"` // only on first chunk + Arguments string `json:"arguments,omitempty"` // streamed, append across chunks +} + +// CompletionsChatMessage is the stored message type +type CompletionsChatMessage struct { + MessageId string `json:"messageid"` + Message CompletionsMessage `json:"message"` + Usage *CompletionsUsage `json:"usage,omitempty"` +} + +type CompletionsUsage struct { + Model string `json:"model,omitempty"` + PromptTokens int `json:"prompt_tokens,omitempty"` + CompletionTokens int `json:"completion_tokens,omitempty"` + TotalTokens int `json:"total_tokens,omitempty"` +} + +func (m *CompletionsChatMessage) GetMessageId() string { + return m.MessageId +} + +func (m *CompletionsChatMessage) GetRole() string { + return m.Message.Role +} + +func (m *CompletionsChatMessage) GetUsage() *uctypes.AIUsage { + if m.Usage == nil { + return nil + } + return &uctypes.AIUsage{ + APIType: "openai-comp", + Model: m.Usage.Model, + InputTokens: m.Usage.PromptTokens, + OutputTokens: m.Usage.CompletionTokens, + } +} diff --git a/pkg/aiusechat/usechat-backend.go b/pkg/aiusechat/usechat-backend.go index adebb11282..884a94013c 100644 --- a/pkg/aiusechat/usechat-backend.go +++ b/pkg/aiusechat/usechat-backend.go @@ -9,6 +9,7 @@ import ( "github.com/wavetermdev/waveterm/pkg/aiusechat/anthropic" "github.com/wavetermdev/waveterm/pkg/aiusechat/openai" + "github.com/wavetermdev/waveterm/pkg/aiusechat/openaicomp" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" "github.com/wavetermdev/waveterm/pkg/web/sse" ) @@ -51,6 +52,7 @@ type UseChatBackend interface { // Compile-time interface checks var _ UseChatBackend = (*openaiResponsesBackend)(nil) +var _ UseChatBackend = (*openaiCompletionsBackend)(nil) var _ UseChatBackend = (*anthropicBackend)(nil) // GetBackendByAPIType returns the appropriate UseChatBackend implementation for the given API type @@ -58,6 +60,8 @@ func GetBackendByAPIType(apiType string) (UseChatBackend, error) { switch apiType { case APIType_OpenAI: return &openaiResponsesBackend{}, nil + case APIType_OpenAIComp: + return &openaiCompletionsBackend{}, nil case APIType_Anthropic: return &anthropicBackend{}, nil default: @@ -119,6 +123,43 @@ func (b *openaiResponsesBackend) ConvertAIChatToUIChat(aiChat uctypes.AIChat) (* return openai.ConvertAIChatToUIChat(aiChat) } +// openaiCompletionsBackend implements UseChatBackend for OpenAI Completions API +type openaiCompletionsBackend struct{} + +func (b *openaiCompletionsBackend) RunChatStep( + ctx context.Context, + sseHandler *sse.SSEHandlerCh, + chatOpts uctypes.WaveChatOpts, + cont *uctypes.WaveContinueResponse, +) (*uctypes.WaveStopReason, []uctypes.GenAIMessage, *uctypes.RateLimitInfo, error) { + stopReason, msgs, rateLimitInfo, err := openaicomp.RunCompletionsChatStep(ctx, sseHandler, chatOpts, cont) + var genMsgs []uctypes.GenAIMessage + for _, msg := range msgs { + genMsgs = append(genMsgs, msg) + } + return stopReason, genMsgs, rateLimitInfo, err +} + +func (b *openaiCompletionsBackend) UpdateToolUseData(chatId string, toolCallId string, toolUseData *uctypes.UIMessageDataToolUse) error { + return fmt.Errorf("tools not supported in openai-comp backend") +} + +func (b *openaiCompletionsBackend) ConvertToolResultsToNativeChatMessage(toolResults []uctypes.AIToolResult) ([]uctypes.GenAIMessage, error) { + return nil, fmt.Errorf("tools not supported in openai-comp backend") +} + +func (b *openaiCompletionsBackend) ConvertAIMessageToNativeChatMessage(message uctypes.AIMessage) (uctypes.GenAIMessage, error) { + return openaicomp.ConvertAIMessageToCompletionsMessage(message) +} + +func (b *openaiCompletionsBackend) GetFunctionCallInputByToolCallId(aiChat uctypes.AIChat, toolCallId string) *uctypes.AIFunctionCallInput { + return nil +} + +func (b *openaiCompletionsBackend) ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { + return openaicomp.ConvertAIChatToUIChat(aiChat) +} + // anthropicBackend implements UseChatBackend for Anthropic API type anthropicBackend struct{} diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index e5866bcaf4..f57a2c622b 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -33,8 +33,9 @@ import ( ) const ( - APIType_Anthropic = "anthropic" - APIType_OpenAI = "openai" + APIType_Anthropic = "anthropic" + APIType_OpenAI = "openai" + APIType_OpenAIComp = "openai-comp" ) const DefaultAPI = APIType_OpenAI @@ -389,8 +390,8 @@ func RunAIChat(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseCha stepNum := chatstore.DefaultChatStore.CountUserMessages(chatOpts.ChatId) metrics := &uctypes.AIMetrics{ - ChatId: chatOpts.ChatId, - StepNum: stepNum, + ChatId: chatOpts.ChatId, + StepNum: stepNum, Usage: uctypes.AIUsage{ APIType: chatOpts.Config.APIType, Model: chatOpts.Config.Model, From 0e4a872983537249038a669e211aaf11ebbe3c3d Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 15:07:36 -0800 Subject: [PATCH 02/31] test with mistral on openrouter --- cmd/testai/main-testai.go | 70 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 65 insertions(+), 5 deletions(-) diff --git a/cmd/testai/main-testai.go b/cmd/testai/main-testai.go index a6f1a98ff8..226c038ebd 100644 --- a/cmd/testai/main-testai.go +++ b/cmd/testai/main-testai.go @@ -24,8 +24,9 @@ import ( var testSchemaJSON string const ( - DefaultAnthropicModel = "claude-sonnet-4-5" - DefaultOpenAIModel = "gpt-5.1" + DefaultAnthropicModel = "claude-sonnet-4-5" + DefaultOpenAIModel = "gpt-5.1" + DefaultOpenRouterModel = "mistralai/mistral-small-3.2-24b-instruct" ) // TestResponseWriter implements http.ResponseWriter and additional interfaces for testing @@ -205,6 +206,56 @@ func testOpenAIComp(ctx context.Context, model, message string, tools []uctypes. } } +func testOpenRouter(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) { + apiKey := os.Getenv("OPENROUTER_APIKEY") + if apiKey == "" { + fmt.Println("Error: OPENROUTER_APIKEY environment variable not set") + os.Exit(1) + } + + opts := &uctypes.AIOptsType{ + APIType: aiusechat.APIType_OpenAIComp, + APIToken: apiKey, + BaseURL: "https://openrouter.ai/api/v1/chat/completions", + Model: model, + MaxTokens: 4096, + ThinkingLevel: uctypes.ThinkingLevelMedium, + } + + chatID := uuid.New().String() + + aiMessage := &uctypes.AIMessage{ + MessageId: uuid.New().String(), + Parts: []uctypes.AIMessagePart{ + { + Type: uctypes.AIMessagePartTypeText, + Text: message, + }, + }, + } + + fmt.Printf("Testing OpenRouter with WaveAIPostMessageWrap, model: %s\n", model) + fmt.Printf("Message: %s\n", message) + fmt.Printf("Chat ID: %s\n", chatID) + fmt.Println("---") + + testWriter := &TestResponseWriter{} + sseHandler := sse.MakeSSEHandlerCh(testWriter, ctx) + defer sseHandler.Close() + + chatOpts := uctypes.WaveChatOpts{ + ChatId: chatID, + ClientId: uuid.New().String(), + Config: *opts, + Tools: tools, + SystemPrompt: []string{"You are a helpful assistant. Be concise and clear in your responses."}, + } + err := aiusechat.WaveAIPostMessageWrap(ctx, sseHandler, aiMessage, chatOpts) + if err != nil { + fmt.Printf("OpenRouter streaming error: %v\n", err) + } +} + func testAnthropic(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) { apiKey := os.Getenv("ANTHROPIC_APIKEY") if apiKey == "" { @@ -272,32 +323,37 @@ func testT3(ctx context.Context) { } func printUsage() { - fmt.Println("Usage: go run main-testai.go [--anthropic|--openaicomp] [--tools] [--model ] [message]") + fmt.Println("Usage: go run main-testai.go [--anthropic|--openaicomp|--openrouter] [--tools] [--model ] [message]") fmt.Println("Examples:") fmt.Println(" go run main-testai.go 'What is 2+2?'") fmt.Println(" go run main-testai.go --model o4-mini 'What is 2+2?'") fmt.Println(" go run main-testai.go --anthropic 'What is 2+2?'") fmt.Println(" go run main-testai.go --anthropic --model claude-3-5-sonnet-20241022 'What is 2+2?'") fmt.Println(" go run main-testai.go --openaicomp --model gpt-4o 'What is 2+2?'") + fmt.Println(" go run main-testai.go --openrouter 'What is 2+2?'") + fmt.Println(" go run main-testai.go --openrouter --model anthropic/claude-3.5-sonnet 'What is 2+2?'") fmt.Println(" go run main-testai.go --tools 'Help me configure GitHub Actions monitoring'") fmt.Println("") fmt.Println("Default models:") fmt.Printf(" OpenAI: %s\n", DefaultOpenAIModel) fmt.Printf(" Anthropic: %s\n", DefaultAnthropicModel) fmt.Printf(" OpenAI Completions: gpt-4o\n") + fmt.Printf(" OpenRouter: %s\n", DefaultOpenRouterModel) fmt.Println("") fmt.Println("Environment variables:") fmt.Println(" OPENAI_APIKEY (for OpenAI models)") fmt.Println(" ANTHROPIC_APIKEY (for Anthropic models)") + fmt.Println(" OPENROUTER_APIKEY (for OpenRouter models)") } func main() { - var anthropic, openaicomp, tools, help, t1, t2, t3 bool + var anthropic, openaicomp, openrouter, tools, help, t1, t2, t3 bool var model string flag.BoolVar(&anthropic, "anthropic", false, "Use Anthropic API instead of OpenAI") flag.BoolVar(&openaicomp, "openaicomp", false, "Use OpenAI Completions API") + flag.BoolVar(&openrouter, "openrouter", false, "Use OpenRouter API") flag.BoolVar(&tools, "tools", false, "Enable GitHub Actions Monitor tools for testing") - flag.StringVar(&model, "model", "", fmt.Sprintf("AI model to use (defaults: %s for OpenAI, %s for Anthropic)", DefaultOpenAIModel, DefaultAnthropicModel)) + flag.StringVar(&model, "model", "", fmt.Sprintf("AI model to use (defaults: %s for OpenAI, %s for Anthropic, %s for OpenRouter)", DefaultOpenAIModel, DefaultAnthropicModel, DefaultOpenRouterModel)) flag.BoolVar(&help, "help", false, "Show usage information") flag.BoolVar(&t1, "t1", false, fmt.Sprintf("Run preset T1 test (%s with 'what is 2+2')", DefaultAnthropicModel)) flag.BoolVar(&t2, "t2", false, fmt.Sprintf("Run preset T2 test (%s with 'what is 2+2')", DefaultOpenAIModel)) @@ -331,6 +387,8 @@ func main() { model = DefaultAnthropicModel } else if openaicomp { model = "gpt-4o" + } else if openrouter { + model = DefaultOpenRouterModel } else { model = DefaultOpenAIModel } @@ -351,6 +409,8 @@ func main() { testAnthropic(ctx, model, message, toolDefs) } else if openaicomp { testOpenAIComp(ctx, model, message, toolDefs) + } else if openrouter { + testOpenRouter(ctx, model, message, toolDefs) } else { testOpenAI(ctx, model, message, toolDefs) } From 2df1efcee356ad1f6372d66826780f8e319d4453 Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 15:19:51 -0800 Subject: [PATCH 03/31] text file parts and add tab state and platforminfo... --- .../openaicomp/openaicomp-convertmessage.go | 53 ++++++++++++++++++- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go index cb4082521b..c697a4cd4b 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -22,6 +22,16 @@ const ( OpenAICompDefaultMaxTokens = 4096 ) +// appendToLastUserMessage appends text to the last user message in the messages slice +func appendToLastUserMessage(messages []CompletionsMessage, text string) { + for i := len(messages) - 1; i >= 0; i-- { + if messages[i].Role == "user" { + messages[i].Content += "\n\n" + text + break + } + } +} + // buildCompletionsHTTPRequest creates an HTTP request for the OpenAI completions API func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMessage, chatOpts uctypes.WaveChatOpts) (*http.Request, error) { opts := chatOpts.Config @@ -47,6 +57,14 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess finalMessages = append([]CompletionsMessage{systemMessage}, messages...) } + // injected data + if chatOpts.TabState != "" { + appendToLastUserMessage(finalMessages, chatOpts.TabState) + } + if chatOpts.PlatformInfo != "" { + appendToLastUserMessage(finalMessages, "\n"+chatOpts.PlatformInfo+"\n") + } + reqBody := &CompletionsRequest{ Model: opts.Model, Messages: finalMessages, @@ -92,15 +110,46 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess } // ConvertAIMessageToCompletionsMessage converts an AIMessage to CompletionsChatMessage +// These messages are ALWAYS role "user" func ConvertAIMessageToCompletionsMessage(aiMsg uctypes.AIMessage) (*CompletionsChatMessage, error) { if err := aiMsg.Validate(); err != nil { return nil, fmt.Errorf("invalid AIMessage: %w", err) } var textBuilder strings.Builder + firstText := true for _, part := range aiMsg.Parts { - if part.Type == uctypes.AIMessagePartTypeText { - textBuilder.WriteString(part.Text) + var partText string + + switch { + case part.Type == uctypes.AIMessagePartTypeText: + partText = part.Text + + case part.MimeType == "text/plain": + textData, err := aiutil.ExtractTextData(part.Data, part.URL) + if err != nil { + log.Printf("openaicomp: error extracting text data for %s: %v\n", part.FileName, err) + continue + } + partText = aiutil.FormatAttachedTextFile(part.FileName, textData) + + case part.MimeType == "directory": + if len(part.Data) == 0 { + log.Printf("openaicomp: directory listing part missing data for %s\n", part.FileName) + continue + } + partText = aiutil.FormatAttachedDirectoryListing(part.FileName, string(part.Data)) + + default: + continue + } + + if partText != "" { + if !firstText { + textBuilder.WriteString("\n\n") + } + textBuilder.WriteString(partText) + firstText = false } } From 56e19fdbae28f1efffc951a30b1d17ae9d9f120e Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 15:29:50 -0800 Subject: [PATCH 04/31] move thinking mode to static initialization --- pkg/aiusechat/usechat-thinkingmode.go | 67 +++++++++++++++++++++++++++ pkg/aiusechat/usechat.go | 53 ++++++--------------- 2 files changed, 81 insertions(+), 39 deletions(-) create mode 100644 pkg/aiusechat/usechat-thinkingmode.go diff --git a/pkg/aiusechat/usechat-thinkingmode.go b/pkg/aiusechat/usechat-thinkingmode.go new file mode 100644 index 0000000000..8e839b4872 --- /dev/null +++ b/pkg/aiusechat/usechat-thinkingmode.go @@ -0,0 +1,67 @@ +// Copyright 2025, Command Line Inc. +// SPDX-License-Identifier: Apache-2.0 + +package aiusechat + +import ( + "fmt" + + "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" +) + +type ThinkingModeConfig struct { + Mode string `json:"mode"` + DisplayName string `json:"displayname"` + APIType string `json:"apitype"` + Model string `json:"model"` + ThinkingLevel string `json:"thinkinglevel"` + BaseURL string `json:"baseurl,omitempty"` + APIVersion string `json:"apiversion,omitempty"` + APIToken string `json:"apitoken,omitempty"` + Premium bool `json:"premium"` + Icon string `json:"icon"` + Description string `json:"description"` +} + +var thinkingModeConfigs = map[string]ThinkingModeConfig{ + uctypes.ThinkingModeQuick: { + Mode: uctypes.ThinkingModeQuick, + DisplayName: "Quick", + APIType: APIType_OpenAI, + Model: uctypes.DefaultOpenAIModel, + ThinkingLevel: uctypes.ThinkingLevelLow, + Premium: false, + Icon: "fa-bolt", + Description: "Fastest responses (gpt-5-mini)", + }, + uctypes.ThinkingModeBalanced: { + Mode: uctypes.ThinkingModeBalanced, + DisplayName: "Balanced", + APIType: APIType_OpenAI, + Model: uctypes.PremiumOpenAIModel, + ThinkingLevel: uctypes.ThinkingLevelLow, + Premium: true, + Icon: "fa-sparkles", + Description: "Good mix of speed and accuracy\n(gpt-5.1 with minimal thinking)", + }, + uctypes.ThinkingModeDeep: { + Mode: uctypes.ThinkingModeDeep, + DisplayName: "Deep", + APIType: APIType_OpenAI, + Model: uctypes.PremiumOpenAIModel, + ThinkingLevel: uctypes.ThinkingLevelMedium, + Premium: true, + Icon: "fa-lightbulb", + Description: "Slower but most capable\n(gpt-5.1 with full reasoning)", + }, +} + +func getThinkingModeConfig(thinkingMode string) (*ThinkingModeConfig, error) { + config, ok := thinkingModeConfigs[thinkingMode] + if !ok { + return nil, fmt.Errorf("invalid thinking mode: %s", thinkingMode) + } + + configCopy := config + return &configCopy, nil +} \ No newline at end of file diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index f57a2c622b..7bf8da3845 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -117,45 +117,20 @@ func getWaveAISettings(premium bool, builderMode bool, rtInfo *waveobj.ObjRTInfo } else { thinkingMode = uctypes.ThinkingModeQuick } - if DefaultAPI == APIType_Anthropic { - thinkingLevel := uctypes.ThinkingLevelMedium - return &uctypes.AIOptsType{ - APIType: APIType_Anthropic, - Model: uctypes.DefaultAnthropicModel, - MaxTokens: maxTokens, - ThinkingLevel: thinkingLevel, - ThinkingMode: thinkingMode, - BaseURL: baseUrl, - }, nil - } else if DefaultAPI == APIType_OpenAI { - var model string - var thinkingLevel string - - switch thinkingMode { - case uctypes.ThinkingModeQuick: - model = uctypes.DefaultOpenAIModel - thinkingLevel = uctypes.ThinkingLevelLow - case uctypes.ThinkingModeBalanced: - model = uctypes.PremiumOpenAIModel - thinkingLevel = uctypes.ThinkingLevelLow - case uctypes.ThinkingModeDeep: - model = uctypes.PremiumOpenAIModel - thinkingLevel = uctypes.ThinkingLevelMedium - default: - model = uctypes.PremiumOpenAIModel - thinkingLevel = uctypes.ThinkingLevelLow - } - - return &uctypes.AIOptsType{ - APIType: APIType_OpenAI, - Model: model, - MaxTokens: maxTokens, - ThinkingLevel: thinkingLevel, - ThinkingMode: thinkingMode, - BaseURL: baseUrl, - }, nil - } - return nil, fmt.Errorf("invalid API type: %s", DefaultAPI) + + config, err := getThinkingModeConfig(thinkingMode) + if err != nil { + return nil, err + } + + return &uctypes.AIOptsType{ + APIType: config.APIType, + Model: config.Model, + MaxTokens: maxTokens, + ThinkingLevel: config.ThinkingLevel, + ThinkingMode: thinkingMode, + BaseURL: baseUrl, + }, nil } func shouldUseChatCompletionsAPI(model string) bool { From bf7324e29f9f75a60b8b5aeba941de422359741b Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 15:42:09 -0800 Subject: [PATCH 05/31] backend usechat thinking modes --- frontend/app/store/wshclientapi.ts | 5 +++++ frontend/types/gotypes.d.ts | 15 +++++++++++++++ pkg/aiusechat/uctypes/usechat-types.go | 14 ++++++++++++++ pkg/aiusechat/usechat-thinkingmode.go | 26 ++++++++++---------------- pkg/wshrpc/wshclient/wshclient.go | 6 ++++++ pkg/wshrpc/wshrpctypes.go | 2 ++ pkg/wshrpc/wshserver/wshserver.go | 6 ++++-- 7 files changed, 56 insertions(+), 18 deletions(-) diff --git a/frontend/app/store/wshclientapi.ts b/frontend/app/store/wshclientapi.ts index 0715eae699..5d6387e3b2 100644 --- a/frontend/app/store/wshclientapi.ts +++ b/frontend/app/store/wshclientapi.ts @@ -612,6 +612,11 @@ class RpcApiType { return client.wshRpcCall("waveaienabletelemetry", null, opts); } + // command "waveaigetmodes" [call] + WaveAIGetModesCommand(client: WshClient, opts?: RpcOpts): Promise { + return client.wshRpcCall("waveaigetmodes", null, opts); + } + // command "waveaigettooldiff" [call] WaveAIGetToolDiffCommand(client: WshClient, data: CommandWaveAIGetToolDiffData, opts?: RpcOpts): Promise { return client.wshRpcCall("waveaigettooldiff", data, opts); diff --git a/frontend/types/gotypes.d.ts b/frontend/types/gotypes.d.ts index 8b80fe62af..14bcc83c2f 100644 --- a/frontend/types/gotypes.d.ts +++ b/frontend/types/gotypes.d.ts @@ -1309,6 +1309,21 @@ declare global { cursor: string; }; + // uctypes.ThinkingModeConfig + type ThinkingModeConfig = { + mode: string; + displayname: string; + apitype: string; + model: string; + thinkinglevel: string; + baseurl?: string; + apiversion?: string; + apitoken?: string; + premium: boolean; + icon: string; + description: string; + }; + // wshrpc.TimeSeriesData type TimeSeriesData = { ts: number; diff --git a/pkg/aiusechat/uctypes/usechat-types.go b/pkg/aiusechat/uctypes/usechat-types.go index 4154ceacf0..90229bf90f 100644 --- a/pkg/aiusechat/uctypes/usechat-types.go +++ b/pkg/aiusechat/uctypes/usechat-types.go @@ -144,6 +144,20 @@ const ( ApprovalAutoApproved = "auto-approved" ) +type ThinkingModeConfig struct { + Mode string `json:"mode"` + DisplayName string `json:"displayname"` + APIType string `json:"apitype"` + Model string `json:"model"` + ThinkingLevel string `json:"thinkinglevel"` + BaseURL string `json:"baseurl,omitempty"` + APIVersion string `json:"apiversion,omitempty"` + APIToken string `json:"apitoken,omitempty"` + Premium bool `json:"premium"` + Icon string `json:"icon"` + Description string `json:"description"` +} + // when updating this struct, also modify frontend/app/aipanel/aitypes.ts WaveUIDataTypes.tooluse type UIMessageDataToolUse struct { ToolCallId string `json:"toolcallid"` diff --git a/pkg/aiusechat/usechat-thinkingmode.go b/pkg/aiusechat/usechat-thinkingmode.go index 8e839b4872..7e2c48ca49 100644 --- a/pkg/aiusechat/usechat-thinkingmode.go +++ b/pkg/aiusechat/usechat-thinkingmode.go @@ -9,21 +9,7 @@ import ( "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" ) -type ThinkingModeConfig struct { - Mode string `json:"mode"` - DisplayName string `json:"displayname"` - APIType string `json:"apitype"` - Model string `json:"model"` - ThinkingLevel string `json:"thinkinglevel"` - BaseURL string `json:"baseurl,omitempty"` - APIVersion string `json:"apiversion,omitempty"` - APIToken string `json:"apitoken,omitempty"` - Premium bool `json:"premium"` - Icon string `json:"icon"` - Description string `json:"description"` -} - -var thinkingModeConfigs = map[string]ThinkingModeConfig{ +var thinkingModeConfigs = map[string]uctypes.ThinkingModeConfig{ uctypes.ThinkingModeQuick: { Mode: uctypes.ThinkingModeQuick, DisplayName: "Quick", @@ -56,7 +42,7 @@ var thinkingModeConfigs = map[string]ThinkingModeConfig{ }, } -func getThinkingModeConfig(thinkingMode string) (*ThinkingModeConfig, error) { +func getThinkingModeConfig(thinkingMode string) (*uctypes.ThinkingModeConfig, error) { config, ok := thinkingModeConfigs[thinkingMode] if !ok { return nil, fmt.Errorf("invalid thinking mode: %s", thinkingMode) @@ -64,4 +50,12 @@ func getThinkingModeConfig(thinkingMode string) (*ThinkingModeConfig, error) { configCopy := config return &configCopy, nil +} + +func WaveAIGetModes() ([]uctypes.ThinkingModeConfig, error) { + modes := make([]uctypes.ThinkingModeConfig, 0, len(thinkingModeConfigs)) + for _, config := range thinkingModeConfigs { + modes = append(modes, config) + } + return modes, nil } \ No newline at end of file diff --git a/pkg/wshrpc/wshclient/wshclient.go b/pkg/wshrpc/wshclient/wshclient.go index 10f2d17540..2290180f2c 100644 --- a/pkg/wshrpc/wshclient/wshclient.go +++ b/pkg/wshrpc/wshclient/wshclient.go @@ -731,6 +731,12 @@ func WaveAIEnableTelemetryCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) error return err } +// command "waveaigetmodes", wshserver.WaveAIGetModesCommand +func WaveAIGetModesCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]uctypes.ThinkingModeConfig, error) { + resp, err := sendRpcRequestCallHelper[[]uctypes.ThinkingModeConfig](w, "waveaigetmodes", nil, opts) + return resp, err +} + // command "waveaigettooldiff", wshserver.WaveAIGetToolDiffCommand func WaveAIGetToolDiffCommand(w *wshutil.WshRpc, data wshrpc.CommandWaveAIGetToolDiffData, opts *wshrpc.RpcOpts) (*wshrpc.CommandWaveAIGetToolDiffRtnData, error) { resp, err := sendRpcRequestCallHelper[*wshrpc.CommandWaveAIGetToolDiffRtnData](w, "waveaigettooldiff", data, opts) diff --git a/pkg/wshrpc/wshrpctypes.go b/pkg/wshrpc/wshrpctypes.go index 0ce53d257f..cd7f62de3e 100644 --- a/pkg/wshrpc/wshrpctypes.go +++ b/pkg/wshrpc/wshrpctypes.go @@ -148,6 +148,7 @@ const ( Command_WaveAIToolApprove = "waveaitoolapprove" Command_WaveAIAddContext = "waveaiaddcontext" Command_WaveAIGetToolDiff = "waveaigettooldiff" + Command_WaveAIGetModes = "waveaigetmodes" Command_CaptureBlockScreenshot = "captureblockscreenshot" @@ -320,6 +321,7 @@ type WshRpcInterface interface { WaveAIToolApproveCommand(ctx context.Context, data CommandWaveAIToolApproveData) error WaveAIAddContextCommand(ctx context.Context, data CommandWaveAIAddContextData) error WaveAIGetToolDiffCommand(ctx context.Context, data CommandWaveAIGetToolDiffData) (*CommandWaveAIGetToolDiffRtnData, error) + WaveAIGetModesCommand(ctx context.Context) ([]uctypes.ThinkingModeConfig, error) // screenshot CaptureBlockScreenshotCommand(ctx context.Context, data CommandCaptureBlockScreenshotData) (string, error) diff --git a/pkg/wshrpc/wshserver/wshserver.go b/pkg/wshrpc/wshserver/wshserver.go index 131ac51e60..a5f10b9604 100644 --- a/pkg/wshrpc/wshserver/wshserver.go +++ b/pkg/wshrpc/wshserver/wshserver.go @@ -580,12 +580,10 @@ func (ws *WshServer) EventReadHistoryCommand(ctx context.Context, data wshrpc.Co } func (ws *WshServer) SetConfigCommand(ctx context.Context, data wshrpc.MetaSettingsType) error { - log.Printf("SETCONFIG: %v\n", data) return wconfig.SetBaseConfigValue(data.MetaMapType) } func (ws *WshServer) SetConnectionsConfigCommand(ctx context.Context, data wshrpc.ConnConfigRequest) error { - log.Printf("SET CONNECTIONS CONFIG: %v\n", data) return wconfig.SetConnectionsConfigValue(data.Host, data.MetaMapType) } @@ -1264,6 +1262,10 @@ func (ws *WshServer) WaveAIGetToolDiffCommand(ctx context.Context, data wshrpc.C }, nil } +func (ws *WshServer) WaveAIGetModesCommand(ctx context.Context) ([]uctypes.ThinkingModeConfig, error) { + return aiusechat.WaveAIGetModes() +} + var wshActivityRe = regexp.MustCompile(`^[a-z:#]+$`) func (ws *WshServer) WshActivityCommand(ctx context.Context, data map[string]int) error { From c81e0c607c9aa0673364866f7aa8a5adcdb59a20 Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 16:02:12 -0800 Subject: [PATCH 06/31] pull thinkingmodes from the backend --- frontend/app/aipanel/thinkingmode.tsx | 84 +++++++++++--------------- frontend/app/aipanel/waveai-model.tsx | 13 ++++ frontend/app/store/wshclientapi.ts | 2 +- frontend/types/gotypes.d.ts | 30 ++++----- pkg/aiusechat/uctypes/usechat-types.go | 2 +- pkg/aiusechat/usechat-thinkingmode.go | 10 +-- pkg/wshrpc/wshclient/wshclient.go | 4 +- pkg/wshrpc/wshrpctypes.go | 2 +- pkg/wshrpc/wshserver/wshserver.go | 2 +- 9 files changed, 74 insertions(+), 75 deletions(-) diff --git a/frontend/app/aipanel/thinkingmode.tsx b/frontend/app/aipanel/thinkingmode.tsx index 1e0fb76be7..a941be1641 100644 --- a/frontend/app/aipanel/thinkingmode.tsx +++ b/frontend/app/aipanel/thinkingmode.tsx @@ -9,37 +9,10 @@ import { WaveAIModel } from "./waveai-model"; type ThinkingMode = "quick" | "balanced" | "deep"; -interface ThinkingModeMetadata { - icon: string; - name: string; - desc: string; - premium: boolean; -} - -const ThinkingModeData: Record = { - quick: { - icon: "fa-bolt", - name: "Quick", - desc: "Fastest responses (gpt-5-mini)", - premium: false, - }, - balanced: { - icon: "fa-sparkles", - name: "Balanced", - desc: "Good mix of speed and accuracy\n(gpt-5.1 with minimal thinking)", - premium: true, - }, - deep: { - icon: "fa-lightbulb", - name: "Deep", - desc: "Slower but most capable\n(gpt-5.1 with full reasoning)", - premium: true, - }, -}; - export const ThinkingLevelDropdown = memo(() => { const model = WaveAIModel.getInstance(); const thinkingMode = useAtomValue(model.thinkingMode); + const thinkingModeConfigs = useAtomValue(model.thinkingModeConfigs); const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom); const [isOpen, setIsOpen] = useState(false); const dropdownRef = useRef(null); @@ -47,24 +20,38 @@ export const ThinkingLevelDropdown = memo(() => { const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; const hideQuick = model.inBuilder && hasPremium; - const handleSelect = (mode: ThinkingMode) => { - const metadata = ThinkingModeData[mode]; - if (!hasPremium && metadata.premium) { + const configsMap = thinkingModeConfigs.reduce((acc, config) => { + acc[config.mode] = config; + return acc; + }, {} as Record); + + const handleSelect = (mode: string) => { + const config = configsMap[mode]; + if (!config) return; + if (!hasPremium && config.premium) { return; } model.setThinkingMode(mode); setIsOpen(false); }; - let currentMode = (thinkingMode as ThinkingMode) || "balanced"; - const currentMetadata = ThinkingModeData[currentMode]; - if (!hasPremium && currentMetadata.premium) { + let currentMode = thinkingMode || "balanced"; + const currentConfig = configsMap[currentMode]; + if (!currentConfig) { + return null; + } + if (!hasPremium && currentConfig.premium) { currentMode = "quick"; } if (hideQuick && currentMode === "quick") { currentMode = "balanced"; } + const displayConfig = configsMap[currentMode]; + if (!displayConfig) { + return null; + } + return (
@@ -86,18 +73,17 @@ export const ThinkingLevelDropdown = memo(() => { <>
setIsOpen(false)} />
- {(Object.keys(ThinkingModeData) as ThinkingMode[]) - .filter((mode) => !(hideQuick && mode === "quick")) - .map((mode, index, filteredModes) => { - const metadata = ThinkingModeData[mode]; + {thinkingModeConfigs + .filter((config) => !(hideQuick && config.mode === "quick")) + .map((config, index, filteredConfigs) => { const isFirst = index === 0; - const isLast = index === filteredModes.length - 1; - const isDisabled = !hasPremium && metadata.premium; - const isSelected = currentMode === mode; + const isLast = index === filteredConfigs.length - 1; + const isDisabled = !hasPremium && config.premium; + const isSelected = currentMode === config.mode; return ( ); diff --git a/frontend/app/aipanel/waveai-model.tsx b/frontend/app/aipanel/waveai-model.tsx index 7af0914e88..0d7c9c873e 100644 --- a/frontend/app/aipanel/waveai-model.tsx +++ b/frontend/app/aipanel/waveai-model.tsx @@ -58,6 +58,7 @@ export class WaveAIModel { droppedFiles: jotai.PrimitiveAtom = jotai.atom([]); chatId!: jotai.PrimitiveAtom; thinkingMode: jotai.PrimitiveAtom = jotai.atom("balanced"); + thinkingModeConfigs: jotai.PrimitiveAtom = jotai.atom([]); errorMessage: jotai.PrimitiveAtom = jotai.atom(null) as jotai.PrimitiveAtom; modelAtom!: jotai.Atom; containerWidth: jotai.PrimitiveAtom = jotai.atom(0); @@ -445,6 +446,7 @@ export class WaveAIModel { async uiLoadInitialChat() { globalStore.set(this.isLoadingChatAtom, true); + await this.loadThinkingModeConfigs(); const messages = await this.loadInitialChat(); this.useChatSetMessages?.(messages); globalStore.set(this.isLoadingChatAtom, false); @@ -453,6 +455,17 @@ export class WaveAIModel { }, 100); } + async loadThinkingModeConfigs() { + try { + const configs = await RpcApi.WaveAIGetModesCommand(TabRpcClient); + if (configs != null && configs.length > 0) { + globalStore.set(this.thinkingModeConfigs, configs); + } + } catch (error) { + console.error("Failed to load thinking mode configs:", error); + } + } + async ensureRateLimitSet() { const currentInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom); if (currentInfo != null) { diff --git a/frontend/app/store/wshclientapi.ts b/frontend/app/store/wshclientapi.ts index 5d6387e3b2..84e47ad0aa 100644 --- a/frontend/app/store/wshclientapi.ts +++ b/frontend/app/store/wshclientapi.ts @@ -613,7 +613,7 @@ class RpcApiType { } // command "waveaigetmodes" [call] - WaveAIGetModesCommand(client: WshClient, opts?: RpcOpts): Promise { + WaveAIGetModesCommand(client: WshClient, opts?: RpcOpts): Promise { return client.wshRpcCall("waveaigetmodes", null, opts); } diff --git a/frontend/types/gotypes.d.ts b/frontend/types/gotypes.d.ts index 14bcc83c2f..884a0883ca 100644 --- a/frontend/types/gotypes.d.ts +++ b/frontend/types/gotypes.d.ts @@ -13,6 +13,21 @@ declare global { data64: string; }; + // uctypes.AIThinkingModeConfig + type AIThinkingModeConfig = { + mode: string; + displayname: string; + apitype: string; + model: string; + thinkinglevel: string; + baseurl?: string; + apiversion?: string; + apitoken?: string; + premium: boolean; + icon: string; + description: string; + }; + // wshrpc.ActivityDisplayType type ActivityDisplayType = { width: number; @@ -1309,21 +1324,6 @@ declare global { cursor: string; }; - // uctypes.ThinkingModeConfig - type ThinkingModeConfig = { - mode: string; - displayname: string; - apitype: string; - model: string; - thinkinglevel: string; - baseurl?: string; - apiversion?: string; - apitoken?: string; - premium: boolean; - icon: string; - description: string; - }; - // wshrpc.TimeSeriesData type TimeSeriesData = { ts: number; diff --git a/pkg/aiusechat/uctypes/usechat-types.go b/pkg/aiusechat/uctypes/usechat-types.go index 90229bf90f..fd905ca4cd 100644 --- a/pkg/aiusechat/uctypes/usechat-types.go +++ b/pkg/aiusechat/uctypes/usechat-types.go @@ -144,7 +144,7 @@ const ( ApprovalAutoApproved = "auto-approved" ) -type ThinkingModeConfig struct { +type AIThinkingModeConfig struct { Mode string `json:"mode"` DisplayName string `json:"displayname"` APIType string `json:"apitype"` diff --git a/pkg/aiusechat/usechat-thinkingmode.go b/pkg/aiusechat/usechat-thinkingmode.go index 7e2c48ca49..f67a0eab3a 100644 --- a/pkg/aiusechat/usechat-thinkingmode.go +++ b/pkg/aiusechat/usechat-thinkingmode.go @@ -9,7 +9,7 @@ import ( "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" ) -var thinkingModeConfigs = map[string]uctypes.ThinkingModeConfig{ +var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ uctypes.ThinkingModeQuick: { Mode: uctypes.ThinkingModeQuick, DisplayName: "Quick", @@ -42,7 +42,7 @@ var thinkingModeConfigs = map[string]uctypes.ThinkingModeConfig{ }, } -func getThinkingModeConfig(thinkingMode string) (*uctypes.ThinkingModeConfig, error) { +func getThinkingModeConfig(thinkingMode string) (*uctypes.AIThinkingModeConfig, error) { config, ok := thinkingModeConfigs[thinkingMode] if !ok { return nil, fmt.Errorf("invalid thinking mode: %s", thinkingMode) @@ -52,10 +52,10 @@ func getThinkingModeConfig(thinkingMode string) (*uctypes.ThinkingModeConfig, er return &configCopy, nil } -func WaveAIGetModes() ([]uctypes.ThinkingModeConfig, error) { - modes := make([]uctypes.ThinkingModeConfig, 0, len(thinkingModeConfigs)) +func WaveAIGetModes() ([]uctypes.AIThinkingModeConfig, error) { + modes := make([]uctypes.AIThinkingModeConfig, 0, len(thinkingModeConfigs)) for _, config := range thinkingModeConfigs { modes = append(modes, config) } return modes, nil -} \ No newline at end of file +} diff --git a/pkg/wshrpc/wshclient/wshclient.go b/pkg/wshrpc/wshclient/wshclient.go index 2290180f2c..35b2be374b 100644 --- a/pkg/wshrpc/wshclient/wshclient.go +++ b/pkg/wshrpc/wshclient/wshclient.go @@ -732,8 +732,8 @@ func WaveAIEnableTelemetryCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) error } // command "waveaigetmodes", wshserver.WaveAIGetModesCommand -func WaveAIGetModesCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]uctypes.ThinkingModeConfig, error) { - resp, err := sendRpcRequestCallHelper[[]uctypes.ThinkingModeConfig](w, "waveaigetmodes", nil, opts) +func WaveAIGetModesCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]uctypes.AIThinkingModeConfig, error) { + resp, err := sendRpcRequestCallHelper[[]uctypes.AIThinkingModeConfig](w, "waveaigetmodes", nil, opts) return resp, err } diff --git a/pkg/wshrpc/wshrpctypes.go b/pkg/wshrpc/wshrpctypes.go index cd7f62de3e..72c081c484 100644 --- a/pkg/wshrpc/wshrpctypes.go +++ b/pkg/wshrpc/wshrpctypes.go @@ -321,7 +321,7 @@ type WshRpcInterface interface { WaveAIToolApproveCommand(ctx context.Context, data CommandWaveAIToolApproveData) error WaveAIAddContextCommand(ctx context.Context, data CommandWaveAIAddContextData) error WaveAIGetToolDiffCommand(ctx context.Context, data CommandWaveAIGetToolDiffData) (*CommandWaveAIGetToolDiffRtnData, error) - WaveAIGetModesCommand(ctx context.Context) ([]uctypes.ThinkingModeConfig, error) + WaveAIGetModesCommand(ctx context.Context) ([]uctypes.AIThinkingModeConfig, error) // screenshot CaptureBlockScreenshotCommand(ctx context.Context, data CommandCaptureBlockScreenshotData) (string, error) diff --git a/pkg/wshrpc/wshserver/wshserver.go b/pkg/wshrpc/wshserver/wshserver.go index a5f10b9604..10e5949a0d 100644 --- a/pkg/wshrpc/wshserver/wshserver.go +++ b/pkg/wshrpc/wshserver/wshserver.go @@ -1262,7 +1262,7 @@ func (ws *WshServer) WaveAIGetToolDiffCommand(ctx context.Context, data wshrpc.C }, nil } -func (ws *WshServer) WaveAIGetModesCommand(ctx context.Context) ([]uctypes.ThinkingModeConfig, error) { +func (ws *WshServer) WaveAIGetModesCommand(ctx context.Context) ([]uctypes.AIThinkingModeConfig, error) { return aiusechat.WaveAIGetModes() } From 27e1ec58b9bc26f1ed0270798f2ccfc9fc5bcd72 Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 16:25:35 -0800 Subject: [PATCH 07/31] capabilities... in thinkingmodeconfig... --- pkg/aiusechat/uctypes/usechat-types.go | 31 +++++++++++++-------- pkg/aiusechat/usechat-thinkingmode.go | 19 +++++++++++++ pkg/aiusechat/usechat.go | 38 ++++++++++++++++++++++---- 3 files changed, 71 insertions(+), 17 deletions(-) diff --git a/pkg/aiusechat/uctypes/usechat-types.go b/pkg/aiusechat/uctypes/usechat-types.go index fd905ca4cd..ffd78f5d2f 100644 --- a/pkg/aiusechat/uctypes/usechat-types.go +++ b/pkg/aiusechat/uctypes/usechat-types.go @@ -136,6 +136,12 @@ const ( ToolUseStatusCompleted = "completed" ) +const ( + AICapabilityTools = "tools" + AICapabilityImages = "images" + AICapabilityPdfs = "pdfs" +) + const ( ApprovalNeedsApproval = "needs-approval" ApprovalUserApproved = "user-approved" @@ -145,17 +151,20 @@ const ( ) type AIThinkingModeConfig struct { - Mode string `json:"mode"` - DisplayName string `json:"displayname"` - APIType string `json:"apitype"` - Model string `json:"model"` - ThinkingLevel string `json:"thinkinglevel"` - BaseURL string `json:"baseurl,omitempty"` - APIVersion string `json:"apiversion,omitempty"` - APIToken string `json:"apitoken,omitempty"` - Premium bool `json:"premium"` - Icon string `json:"icon"` - Description string `json:"description"` + Mode string `json:"mode"` + DisplayName string `json:"displayname"` + APIType string `json:"apitype"` + Model string `json:"model"` + ThinkingLevel string `json:"thinkinglevel"` + BaseURL string `json:"baseurl,omitempty"` + WaveAICloud bool `json:"waveaicloud,omitempty"` + APIVersion string `json:"apiversion,omitempty"` + APIToken string `json:"apitoken,omitempty"` + APITokenSecretName string `json:"apitokensecretname,omitempty"` + Premium bool `json:"premium"` + Icon string `json:"icon"` + Description string `json:"description"` + Capabilities []string `json:"capabilities,omitempty"` } // when updating this struct, also modify frontend/app/aipanel/aitypes.ts WaveUIDataTypes.tooluse diff --git a/pkg/aiusechat/usechat-thinkingmode.go b/pkg/aiusechat/usechat-thinkingmode.go index f67a0eab3a..f093826ce6 100644 --- a/pkg/aiusechat/usechat-thinkingmode.go +++ b/pkg/aiusechat/usechat-thinkingmode.go @@ -16,9 +16,11 @@ var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ APIType: APIType_OpenAI, Model: uctypes.DefaultOpenAIModel, ThinkingLevel: uctypes.ThinkingLevelLow, + WaveAICloud: true, Premium: false, Icon: "fa-bolt", Description: "Fastest responses (gpt-5-mini)", + Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, }, uctypes.ThinkingModeBalanced: { Mode: uctypes.ThinkingModeBalanced, @@ -26,9 +28,11 @@ var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ APIType: APIType_OpenAI, Model: uctypes.PremiumOpenAIModel, ThinkingLevel: uctypes.ThinkingLevelLow, + WaveAICloud: true, Premium: true, Icon: "fa-sparkles", Description: "Good mix of speed and accuracy\n(gpt-5.1 with minimal thinking)", + Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, }, uctypes.ThinkingModeDeep: { Mode: uctypes.ThinkingModeDeep, @@ -36,9 +40,24 @@ var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ APIType: APIType_OpenAI, Model: uctypes.PremiumOpenAIModel, ThinkingLevel: uctypes.ThinkingLevelMedium, + WaveAICloud: true, Premium: true, Icon: "fa-lightbulb", Description: "Slower but most capable\n(gpt-5.1 with full reasoning)", + Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, + }, + "openrouter:mistral": { + Mode: "openrouter:mistral", + DisplayName: "Mistral (OpenRouter)", + APIType: APIType_OpenAIComp, + BaseURL: "https://openrouter.ai/api/v1/chat/completions", + Model: "mistralai/mistral-small-3.2-24b-instruct", + ThinkingLevel: uctypes.ThinkingLevelLow, + APITokenSecretName: "OPENROUTER_KEY", + Premium: false, + Icon: "fa-bolt", + Description: "Fast and capable via OpenRouter\n(Mistral Small 3.2)", + Capabilities: []string{uctypes.AICapabilityTools}, }, } diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index 7bf8da3845..18f8e90cc7 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -19,6 +19,7 @@ import ( "github.com/google/uuid" "github.com/wavetermdev/waveterm/pkg/aiusechat/chatstore" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" + "github.com/wavetermdev/waveterm/pkg/secretstore" "github.com/wavetermdev/waveterm/pkg/telemetry" "github.com/wavetermdev/waveterm/pkg/telemetry/telemetrydata" "github.com/wavetermdev/waveterm/pkg/util/ds" @@ -41,6 +42,7 @@ const ( const DefaultAPI = APIType_OpenAI const DefaultMaxTokens = 4 * 1024 const BuilderMaxTokens = 24 * 1024 +const WaveAIEndpointEnvName = "WAVETERM_WAVEAI_ENDPOINT" var ( globalRateLimitInfo = &uctypes.RateLimitInfo{Unknown: true} @@ -97,10 +99,6 @@ var SystemPromptText_OpenAI = strings.Join([]string{ }, " ") func getWaveAISettings(premium bool, builderMode bool, rtInfo *waveobj.ObjRTInfo) (*uctypes.AIOptsType, error) { - baseUrl := uctypes.DefaultAIEndpoint - if os.Getenv("WAVETERM_WAVEAI_ENDPOINT") != "" { - baseUrl = os.Getenv("WAVETERM_WAVEAI_ENDPOINT") - } maxTokens := DefaultMaxTokens if builderMode { maxTokens = BuilderMaxTokens @@ -123,14 +121,42 @@ func getWaveAISettings(premium bool, builderMode bool, rtInfo *waveobj.ObjRTInfo return nil, err } - return &uctypes.AIOptsType{ + apiToken := config.APIToken + if apiToken == "" && config.APITokenSecretName != "" { + secret, exists, err := secretstore.GetSecret(config.APITokenSecretName) + if err != nil { + return nil, fmt.Errorf("failed to retrieve secret %s: %w", config.APITokenSecretName, err) + } + if !exists || secret == "" { + return nil, fmt.Errorf("secret %s not found or empty", config.APITokenSecretName) + } + apiToken = secret + } + + var baseUrl string + if config.WaveAICloud { + baseUrl = uctypes.DefaultAIEndpoint + if os.Getenv(WaveAIEndpointEnvName) != "" { + baseUrl = os.Getenv(WaveAIEndpointEnvName) + } + } else if config.BaseURL != "" { + baseUrl = config.BaseURL + } else { + return nil, fmt.Errorf("no BaseURL configured for thinking mode %s", thinkingMode) + } + + opts := &uctypes.AIOptsType{ APIType: config.APIType, Model: config.Model, MaxTokens: maxTokens, ThinkingLevel: config.ThinkingLevel, ThinkingMode: thinkingMode, BaseURL: baseUrl, - }, nil + } + if apiToken != "" { + opts.APIToken = apiToken + } + return opts, nil } func shouldUseChatCompletionsAPI(model string) bool { From c6f2f7239c0721495aaa6732c0d0ab9fddfb6df8 Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 17:38:59 -0800 Subject: [PATCH 08/31] use makeiconclass --- frontend/app/aipanel/thinkingmode.tsx | 6 +++--- pkg/aiusechat/usechat-thinkingmode.go | 8 ++++---- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/frontend/app/aipanel/thinkingmode.tsx b/frontend/app/aipanel/thinkingmode.tsx index a941be1641..64a150d63b 100644 --- a/frontend/app/aipanel/thinkingmode.tsx +++ b/frontend/app/aipanel/thinkingmode.tsx @@ -2,7 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 import { atoms } from "@/app/store/global"; -import { cn } from "@/util/util"; +import { cn, makeIconClass } from "@/util/util"; import { useAtomValue } from "jotai"; import { memo, useRef, useState } from "react"; import { WaveAIModel } from "./waveai-model"; @@ -62,7 +62,7 @@ export const ThinkingLevelDropdown = memo(() => { )} title={`Thinking: ${displayConfig.displayname}`} > - + {displayConfig.displayname} @@ -94,7 +94,7 @@ export const ThinkingLevelDropdown = memo(() => { } transition-colors text-left`} >
- + {config.displayname} {isDisabled && " (premium)"} diff --git a/pkg/aiusechat/usechat-thinkingmode.go b/pkg/aiusechat/usechat-thinkingmode.go index f093826ce6..d31034593f 100644 --- a/pkg/aiusechat/usechat-thinkingmode.go +++ b/pkg/aiusechat/usechat-thinkingmode.go @@ -18,7 +18,7 @@ var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ ThinkingLevel: uctypes.ThinkingLevelLow, WaveAICloud: true, Premium: false, - Icon: "fa-bolt", + Icon: "bolt", Description: "Fastest responses (gpt-5-mini)", Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, }, @@ -30,7 +30,7 @@ var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ ThinkingLevel: uctypes.ThinkingLevelLow, WaveAICloud: true, Premium: true, - Icon: "fa-sparkles", + Icon: "sparkles", Description: "Good mix of speed and accuracy\n(gpt-5.1 with minimal thinking)", Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, }, @@ -42,7 +42,7 @@ var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ ThinkingLevel: uctypes.ThinkingLevelMedium, WaveAICloud: true, Premium: true, - Icon: "fa-lightbulb", + Icon: "lightbulb", Description: "Slower but most capable\n(gpt-5.1 with full reasoning)", Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, }, @@ -55,7 +55,7 @@ var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ ThinkingLevel: uctypes.ThinkingLevelLow, APITokenSecretName: "OPENROUTER_KEY", Premium: false, - Icon: "fa-bolt", + Icon: "bolt", Description: "Fast and capable via OpenRouter\n(Mistral Small 3.2)", Capabilities: []string{uctypes.AICapabilityTools}, }, From 12344e3128bd790deca11b0991bc4dc53a510a65 Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 17:50:10 -0800 Subject: [PATCH 09/31] rename usechat-types.go (for AI). change display fields to have more consistent names --- frontend/app/aipanel/aitypes.ts | 4 ++-- frontend/app/aipanel/thinkingmode.tsx | 11 ++++++----- frontend/types/gotypes.d.ts | 8 ++++++-- .../uctypes/{usechat-types.go => uctypes.go} | 5 +++-- pkg/aiusechat/usechat-thinkingmode.go | 15 +++++++++++---- 5 files changed, 28 insertions(+), 15 deletions(-) rename pkg/aiusechat/uctypes/{usechat-types.go => uctypes.go} (99%) diff --git a/frontend/app/aipanel/aitypes.ts b/frontend/app/aipanel/aitypes.ts index a1192ec7ed..cc3c73d224 100644 --- a/frontend/app/aipanel/aitypes.ts +++ b/frontend/app/aipanel/aitypes.ts @@ -4,14 +4,14 @@ import { ChatRequestOptions, FileUIPart, UIMessage, UIMessagePart } from "ai"; type WaveUIDataTypes = { - // pkg/aiusechat/uctypes/usechat-types.go UIMessageDataUserFile + // pkg/aiusechat/uctypes/uctypes.go UIMessageDataUserFile userfile: { filename: string; size: number; mimetype: string; previewurl?: string; }; - // pkg/aiusechat/uctypes/usechat-types.go UIMessageDataToolUse + // pkg/aiusechat/uctypes/uctypes.go UIMessageDataToolUse tooluse: { toolcallid: string; toolname: string; diff --git a/frontend/app/aipanel/thinkingmode.tsx b/frontend/app/aipanel/thinkingmode.tsx index 64a150d63b..ff70addf39 100644 --- a/frontend/app/aipanel/thinkingmode.tsx +++ b/frontend/app/aipanel/thinkingmode.tsx @@ -60,11 +60,11 @@ export const ThinkingLevelDropdown = memo(() => { "group flex items-center gap-1.5 px-2 py-1 text-xs text-gray-300 hover:text-white rounded transition-colors cursor-pointer border border-gray-600/50", isOpen ? "bg-gray-700" : "bg-gray-800/50 hover:bg-gray-700" )} - title={`Thinking: ${displayConfig.displayname}`} + title={`Thinking: ${displayConfig["display:name"]}`} > - + - {displayConfig.displayname} + {displayConfig["display:name"]} @@ -74,6 +74,7 @@ export const ThinkingLevelDropdown = memo(() => {
setIsOpen(false)} />
{thinkingModeConfigs + .sort((a, b) => (a["display:order"] || 0) - (b["display:order"] || 0) || (a["display:name"] || "").localeCompare(b["display:name"] || "")) .filter((config) => !(hideQuick && config.mode === "quick")) .map((config, index, filteredConfigs) => { const isFirst = index === 0; @@ -94,9 +95,9 @@ export const ThinkingLevelDropdown = memo(() => { } transition-colors text-left`} >
- + - {config.displayname} + {config["display:name"]} {isDisabled && " (premium)"} {isSelected && } diff --git a/frontend/types/gotypes.d.ts b/frontend/types/gotypes.d.ts index 884a0883ca..1996e0df56 100644 --- a/frontend/types/gotypes.d.ts +++ b/frontend/types/gotypes.d.ts @@ -16,16 +16,20 @@ declare global { // uctypes.AIThinkingModeConfig type AIThinkingModeConfig = { mode: string; - displayname: string; + "display:name": string; + "display:order"?: number; + "display:icon": string; apitype: string; model: string; thinkinglevel: string; baseurl?: string; + waveaicloud?: boolean; apiversion?: string; apitoken?: string; + apitokensecretname?: string; premium: boolean; - icon: string; description: string; + capabilities?: string[]; }; // wshrpc.ActivityDisplayType diff --git a/pkg/aiusechat/uctypes/usechat-types.go b/pkg/aiusechat/uctypes/uctypes.go similarity index 99% rename from pkg/aiusechat/uctypes/usechat-types.go rename to pkg/aiusechat/uctypes/uctypes.go index ffd78f5d2f..1dd4c1c807 100644 --- a/pkg/aiusechat/uctypes/usechat-types.go +++ b/pkg/aiusechat/uctypes/uctypes.go @@ -152,7 +152,9 @@ const ( type AIThinkingModeConfig struct { Mode string `json:"mode"` - DisplayName string `json:"displayname"` + DisplayName string `json:"display:name"` + DisplayOrder float64 `json:"display:order,omitempty"` + DisplayIcon string `json:"display:icon"` APIType string `json:"apitype"` Model string `json:"model"` ThinkingLevel string `json:"thinkinglevel"` @@ -162,7 +164,6 @@ type AIThinkingModeConfig struct { APIToken string `json:"apitoken,omitempty"` APITokenSecretName string `json:"apitokensecretname,omitempty"` Premium bool `json:"premium"` - Icon string `json:"icon"` Description string `json:"description"` Capabilities []string `json:"capabilities,omitempty"` } diff --git a/pkg/aiusechat/usechat-thinkingmode.go b/pkg/aiusechat/usechat-thinkingmode.go index d31034593f..b383652efd 100644 --- a/pkg/aiusechat/usechat-thinkingmode.go +++ b/pkg/aiusechat/usechat-thinkingmode.go @@ -5,6 +5,7 @@ package aiusechat import ( "fmt" + "sort" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" ) @@ -13,36 +14,39 @@ var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ uctypes.ThinkingModeQuick: { Mode: uctypes.ThinkingModeQuick, DisplayName: "Quick", + DisplayOrder: -3, APIType: APIType_OpenAI, Model: uctypes.DefaultOpenAIModel, ThinkingLevel: uctypes.ThinkingLevelLow, WaveAICloud: true, Premium: false, - Icon: "bolt", + DisplayIcon: "bolt", Description: "Fastest responses (gpt-5-mini)", Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, }, uctypes.ThinkingModeBalanced: { Mode: uctypes.ThinkingModeBalanced, DisplayName: "Balanced", + DisplayOrder: -2, APIType: APIType_OpenAI, Model: uctypes.PremiumOpenAIModel, ThinkingLevel: uctypes.ThinkingLevelLow, WaveAICloud: true, Premium: true, - Icon: "sparkles", + DisplayIcon: "sparkles", Description: "Good mix of speed and accuracy\n(gpt-5.1 with minimal thinking)", Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, }, uctypes.ThinkingModeDeep: { Mode: uctypes.ThinkingModeDeep, DisplayName: "Deep", + DisplayOrder: -1, APIType: APIType_OpenAI, Model: uctypes.PremiumOpenAIModel, ThinkingLevel: uctypes.ThinkingLevelMedium, WaveAICloud: true, Premium: true, - Icon: "lightbulb", + DisplayIcon: "lightbulb", Description: "Slower but most capable\n(gpt-5.1 with full reasoning)", Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, }, @@ -55,7 +59,7 @@ var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ ThinkingLevel: uctypes.ThinkingLevelLow, APITokenSecretName: "OPENROUTER_KEY", Premium: false, - Icon: "bolt", + DisplayIcon: "bolt", Description: "Fast and capable via OpenRouter\n(Mistral Small 3.2)", Capabilities: []string{uctypes.AICapabilityTools}, }, @@ -76,5 +80,8 @@ func WaveAIGetModes() ([]uctypes.AIThinkingModeConfig, error) { for _, config := range thinkingModeConfigs { modes = append(modes, config) } + sort.Slice(modes, func(i, j int) bool { + return modes[i].DisplayOrder < modes[j].DisplayOrder + }) return modes, nil } From 091cb0c54c171cc52fbbebcb0e688c1d01875a6b Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 17:50:34 -0800 Subject: [PATCH 10/31] remove unused const + format --- frontend/app/aipanel/thinkingmode.tsx | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/frontend/app/aipanel/thinkingmode.tsx b/frontend/app/aipanel/thinkingmode.tsx index ff70addf39..98b2a33115 100644 --- a/frontend/app/aipanel/thinkingmode.tsx +++ b/frontend/app/aipanel/thinkingmode.tsx @@ -7,8 +7,6 @@ import { useAtomValue } from "jotai"; import { memo, useRef, useState } from "react"; import { WaveAIModel } from "./waveai-model"; -type ThinkingMode = "quick" | "balanced" | "deep"; - export const ThinkingLevelDropdown = memo(() => { const model = WaveAIModel.getInstance(); const thinkingMode = useAtomValue(model.thinkingMode); @@ -20,10 +18,13 @@ export const ThinkingLevelDropdown = memo(() => { const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; const hideQuick = model.inBuilder && hasPremium; - const configsMap = thinkingModeConfigs.reduce((acc, config) => { - acc[config.mode] = config; - return acc; - }, {} as Record); + const configsMap = thinkingModeConfigs.reduce( + (acc, config) => { + acc[config.mode] = config; + return acc; + }, + {} as Record + ); const handleSelect = (mode: string) => { const config = configsMap[mode]; @@ -74,7 +75,11 @@ export const ThinkingLevelDropdown = memo(() => {
setIsOpen(false)} />
{thinkingModeConfigs - .sort((a, b) => (a["display:order"] || 0) - (b["display:order"] || 0) || (a["display:name"] || "").localeCompare(b["display:name"] || "")) + .sort( + (a, b) => + (a["display:order"] || 0) - (b["display:order"] || 0) || + (a["display:name"] || "").localeCompare(b["display:name"] || "") + ) .filter((config) => !(hideQuick && config.mode === "quick")) .map((config, index, filteredConfigs) => { const isFirst = index === 0; From 3399a6ef4a9dad4f6a2c6931bc953fea15da2866 Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 18:07:43 -0800 Subject: [PATCH 11/31] add tools, check capabilities --- .../openaicomp/openaicomp-convertmessage.go | 47 +++++++++++++-- pkg/aiusechat/tools_screenshot.go | 1 + pkg/aiusechat/uctypes/uctypes.go | 59 +++++++++++++------ pkg/aiusechat/usechat.go | 1 + 4 files changed, 84 insertions(+), 24 deletions(-) diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go index c697a4cd4b..d11af49c73 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -32,6 +32,31 @@ func appendToLastUserMessage(messages []CompletionsMessage, text string) { } } +// convertToolDefinitions converts Wave ToolDefinitions to OpenAI format +// Only includes tools whose required capabilities are met +func convertToolDefinitions(waveTools []uctypes.ToolDefinition, capabilities []string) []ToolDefinition { + if len(waveTools) == 0 { + return nil + } + + openaiTools := make([]ToolDefinition, 0, len(waveTools)) + for _, waveTool := range waveTools { + if !waveTool.HasRequiredCapabilities(capabilities) { + continue + } + openaiTool := ToolDefinition{ + Type: "function", + Function: ToolFunctionDef{ + Name: waveTool.Name, + Description: waveTool.Description, + Parameters: waveTool.InputSchema, + }, + } + openaiTools = append(openaiTools, openaiTool) + } + return openaiTools +} + // buildCompletionsHTTPRequest creates an HTTP request for the OpenAI completions API func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMessage, chatOpts uctypes.WaveChatOpts) (*http.Request, error) { opts := chatOpts.Config @@ -77,8 +102,18 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess reqBody.MaxTokens = maxTokens } + // Add tool definitions if tools capability is available and tools exist + var allTools []uctypes.ToolDefinition + if opts.HasCapability(uctypes.AICapabilityTools) { + allTools = append(allTools, chatOpts.Tools...) + allTools = append(allTools, chatOpts.TabTools...) + if len(allTools) > 0 { + reqBody.Tools = convertToolDefinitions(allTools, opts.Capabilities) + } + } + if wavebase.IsDevMode() { - log.Printf("openaicomp: model %s, messages: %d\n", opts.Model, len(messages)) + log.Printf("openaicomp: model %s, messages: %d, tools: %d\n", opts.Model, len(messages), len(allTools)) } buf, err := json.Marshal(reqBody) @@ -120,11 +155,11 @@ func ConvertAIMessageToCompletionsMessage(aiMsg uctypes.AIMessage) (*Completions firstText := true for _, part := range aiMsg.Parts { var partText string - + switch { case part.Type == uctypes.AIMessagePartTypeText: partText = part.Text - + case part.MimeType == "text/plain": textData, err := aiutil.ExtractTextData(part.Data, part.URL) if err != nil { @@ -132,18 +167,18 @@ func ConvertAIMessageToCompletionsMessage(aiMsg uctypes.AIMessage) (*Completions continue } partText = aiutil.FormatAttachedTextFile(part.FileName, textData) - + case part.MimeType == "directory": if len(part.Data) == 0 { log.Printf("openaicomp: directory listing part missing data for %s\n", part.FileName) continue } partText = aiutil.FormatAttachedDirectoryListing(part.FileName, string(part.Data)) - + default: continue } - + if partText != "" { if !firstText { textBuilder.WriteString("\n\n") diff --git a/pkg/aiusechat/tools_screenshot.go b/pkg/aiusechat/tools_screenshot.go index 4c924db292..9df5a18f0e 100644 --- a/pkg/aiusechat/tools_screenshot.go +++ b/pkg/aiusechat/tools_screenshot.go @@ -67,6 +67,7 @@ func GetCaptureScreenshotToolDefinition(tabId string) uctypes.ToolDefinition { "required": []string{"widget_id"}, "additionalProperties": false, }, + RequiredCapabilities: []string{uctypes.AICapabilityImages}, ToolCallDesc: func(input any, output any, toolUseData *uctypes.UIMessageDataToolUse) string { inputMap, ok := input.(map[string]any) if !ok { diff --git a/pkg/aiusechat/uctypes/uctypes.go b/pkg/aiusechat/uctypes/uctypes.go index 1dd4c1c807..fc76c576cd 100644 --- a/pkg/aiusechat/uctypes/uctypes.go +++ b/pkg/aiusechat/uctypes/uctypes.go @@ -6,6 +6,7 @@ package uctypes import ( "fmt" "net/url" + "slices" "strings" ) @@ -78,13 +79,14 @@ type UIMessageDataUserFile struct { // ToolDefinition represents a tool that can be used by the AI model type ToolDefinition struct { - Name string `json:"name"` - DisplayName string `json:"displayname,omitempty"` // internal field (cannot marshal to API, must be stripped) - Description string `json:"description"` - ShortDescription string `json:"shortdescription,omitempty"` // internal field (cannot marshal to API, must be stripped) - ToolLogName string `json:"-"` // short name for telemetry (e.g., "term:getscrollback") - InputSchema map[string]any `json:"input_schema"` - Strict bool `json:"strict,omitempty"` + Name string `json:"name"` + DisplayName string `json:"displayname,omitempty"` // internal field (cannot marshal to API, must be stripped) + Description string `json:"description"` + ShortDescription string `json:"shortdescription,omitempty"` // internal field (cannot marshal to API, must be stripped) + ToolLogName string `json:"-"` // short name for telemetry (e.g., "term:getscrollback") + InputSchema map[string]any `json:"input_schema"` + Strict bool `json:"strict,omitempty"` + RequiredCapabilities []string `json:"requiredcapabilities,omitempty"` ToolTextCallback func(any) (string, error) `json:"-"` ToolAnyCallback func(any, *UIMessageDataToolUse) (any, error) `json:"-"` // *UIMessageDataToolUse will NOT be nil @@ -114,6 +116,18 @@ func (td *ToolDefinition) Desc() string { return td.Description } +func (td *ToolDefinition) HasRequiredCapabilities(capabilities []string) bool { + if td == nil || len(td.RequiredCapabilities) == 0 { + return true + } + for _, reqCap := range td.RequiredCapabilities { + if !slices.Contains(capabilities, reqCap) { + return false + } + } + return true +} + //------------------ // Wave specific types, stop reasons, tool calls, config // these are used internally to coordinate the calls/steps @@ -168,6 +182,10 @@ type AIThinkingModeConfig struct { Capabilities []string `json:"capabilities,omitempty"` } +func (c *AIThinkingModeConfig) HasCapability(cap string) bool { + return slices.Contains(c.Capabilities, cap) +} + // when updating this struct, also modify frontend/app/aipanel/aitypes.ts WaveUIDataTypes.tooluse type UIMessageDataToolUse struct { ToolCallId string `json:"toolcallid"` @@ -230,17 +248,18 @@ type WaveContinueResponse struct { // Wave Specific AI opts for configuration type AIOptsType struct { - APIType string `json:"apitype,omitempty"` - Model string `json:"model"` - APIToken string `json:"apitoken"` - OrgID string `json:"orgid,omitempty"` - APIVersion string `json:"apiversion,omitempty"` - BaseURL string `json:"baseurl,omitempty"` - ProxyURL string `json:"proxyurl,omitempty"` - MaxTokens int `json:"maxtokens,omitempty"` - TimeoutMs int `json:"timeoutms,omitempty"` - ThinkingLevel string `json:"thinkinglevel,omitempty"` // ThinkingLevelLow, ThinkingLevelMedium, or ThinkingLevelHigh - ThinkingMode string `json:"thinkingmode,omitempty"` // quick, balanced, or deep + APIType string `json:"apitype,omitempty"` + Model string `json:"model"` + APIToken string `json:"apitoken"` + OrgID string `json:"orgid,omitempty"` + APIVersion string `json:"apiversion,omitempty"` + BaseURL string `json:"baseurl,omitempty"` + ProxyURL string `json:"proxyurl,omitempty"` + MaxTokens int `json:"maxtokens,omitempty"` + TimeoutMs int `json:"timeoutms,omitempty"` + ThinkingLevel string `json:"thinkinglevel,omitempty"` // ThinkingLevelLow, ThinkingLevelMedium, or ThinkingLevelHigh + ThinkingMode string `json:"thinkingmode,omitempty"` // quick, balanced, or deep + Capabilities []string `json:"capabilities,omitempty"` } func (opts AIOptsType) IsWaveProxy() bool { @@ -251,6 +270,10 @@ func (opts AIOptsType) IsPremiumModel() bool { return opts.Model == "gpt-5" || opts.Model == "gpt-5.1" || strings.Contains(opts.Model, "claude-sonnet") } +func (opts AIOptsType) HasCapability(cap string) bool { + return slices.Contains(opts.Capabilities, cap) +} + type AIChat struct { ChatId string `json:"chatid"` APIType string `json:"apitype"` diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index 18f8e90cc7..b91d8879b7 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -152,6 +152,7 @@ func getWaveAISettings(premium bool, builderMode bool, rtInfo *waveobj.ObjRTInfo ThinkingLevel: config.ThinkingLevel, ThinkingMode: thinkingMode, BaseURL: baseUrl, + Capabilities: config.Capabilities, } if apiToken != "" { opts.APIToken = apiToken From 5b786c75432fe3fc26f256052dab4564e7213947 Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 18:19:24 -0800 Subject: [PATCH 12/31] convert tool parts --- .../openaicomp/openaicomp-convertmessage.go | 49 ++++++++++++++++--- pkg/aiusechat/openaicomp/openaicomp-types.go | 28 +++++++++-- 2 files changed, 66 insertions(+), 11 deletions(-) diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go index d11af49c73..da49269c31 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -213,15 +213,48 @@ func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { continue } + var parts []uctypes.UIMessagePart + + // Add text content if present + if compMsg.Message.Content != "" { + parts = append(parts, uctypes.UIMessagePart{ + Type: "text", + Text: compMsg.Message.Content, + }) + } + + // Add tool calls if present (assistant requesting tool use) + if len(compMsg.Message.ToolCalls) > 0 { + for _, toolCall := range compMsg.Message.ToolCalls { + if toolCall.Type != "function" { + continue + } + + // Only add if ToolUseData is available + if toolCall.ToolUseData != nil { + parts = append(parts, uctypes.UIMessagePart{ + Type: "data-tooluse", + ID: toolCall.ID, + Data: *toolCall.ToolUseData, + }) + } + } + } + + // Tool result messages (role "tool") are not converted to UIMessage + if compMsg.Message.Role == "tool" && compMsg.Message.ToolCallID != "" { + continue + } + + // Skip messages with no parts + if len(parts) == 0 { + continue + } + uiMsg := uctypes.UIMessage{ - ID: compMsg.MessageId, - Role: compMsg.Message.Role, - Parts: []uctypes.UIMessagePart{ - { - Type: "text", - Text: compMsg.Message.Content, - }, - }, + ID: compMsg.MessageId, + Role: compMsg.Message.Role, + Parts: parts, } uiChat.Messages = append(uiChat.Messages, uiMsg) diff --git a/pkg/aiusechat/openaicomp/openaicomp-types.go b/pkg/aiusechat/openaicomp/openaicomp-types.go index 0a2a596243..3a22125dee 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-types.go +++ b/pkg/aiusechat/openaicomp/openaicomp-types.go @@ -28,6 +28,18 @@ type CompletionsMessage struct { Name string `json:"name,omitempty"` // tool name on role:"tool" } +func (cm *CompletionsMessage) clean() *CompletionsMessage { + if len(cm.ToolCalls) == 0 { + return cm + } + rtn := *cm + rtn.ToolCalls = make([]ToolCall, len(cm.ToolCalls)) + for i, tc := range cm.ToolCalls { + rtn.ToolCalls[i] = *tc.clean() + } + return &rtn +} + type ToolDefinition struct { Type string `json:"type"` // "function" Function ToolFunctionDef `json:"function"` @@ -40,9 +52,19 @@ type ToolFunctionDef struct { } type ToolCall struct { - ID string `json:"id"` - Type string `json:"type"` // "function" - Function ToolFunctionCall `json:"function"` + ID string `json:"id"` + Type string `json:"type"` // "function" + Function ToolFunctionCall `json:"function"` + ToolUseData *uctypes.UIMessageDataToolUse `json:"toolusedata,omitempty"` // Internal field (must be cleaned before sending to API) +} + +func (tc *ToolCall) clean() *ToolCall { + if tc.ToolUseData == nil { + return tc + } + rtn := *tc + rtn.ToolUseData = nil + return &rtn } type ToolFunctionCall struct { From a65a132cee118b0b08f63bb41e3bb338da9f212d Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 18:32:30 -0800 Subject: [PATCH 13/31] working on tools --- .../openaicomp/openaicomp-backend.go | 52 ++++++++++++++++++- pkg/aiusechat/openaicomp/openaicomp-types.go | 2 +- 2 files changed, 51 insertions(+), 3 deletions(-) diff --git a/pkg/aiusechat/openaicomp/openaicomp-backend.go b/pkg/aiusechat/openaicomp/openaicomp-backend.go index 7ef4823ae6..4255b8ccdf 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-backend.go +++ b/pkg/aiusechat/openaicomp/openaicomp-backend.go @@ -108,6 +108,7 @@ func processCompletionsStream( textID := uuid.New().String() var finishReason string textStarted := false + var toolCallsInProgress []ToolCall _ = sseHandler.AiMsgStart(msgID) _ = sseHandler.AiMsgStartStep() @@ -160,6 +161,31 @@ func processCompletionsStream( _ = sseHandler.AiMsgTextDelta(textID, choice.Delta.Content) } + if len(choice.Delta.ToolCalls) > 0 { + for _, tcDelta := range choice.Delta.ToolCalls { + idx := tcDelta.Index + for len(toolCallsInProgress) <= idx { + toolCallsInProgress = append(toolCallsInProgress, ToolCall{}) + } + + tc := &toolCallsInProgress[idx] + if tcDelta.ID != "" { + tc.ID = tcDelta.ID + } + if tcDelta.Type != "" { + tc.Type = tcDelta.Type + } + if tcDelta.Function != nil { + if tcDelta.Function.Name != "" { + tc.Function.Name = tcDelta.Function.Name + } + if tcDelta.Function.Arguments != "" { + tc.Function.Arguments += tcDelta.Function.Arguments + } + } + } + } + if choice.FinishReason != nil && *choice.FinishReason != "" { finishReason = *choice.FinishReason } @@ -168,18 +194,40 @@ func processCompletionsStream( stopKind := uctypes.StopKindDone if finishReason == "length" { stopKind = uctypes.StopKindMaxTokens + } else if finishReason == "tool_calls" { + stopKind = uctypes.StopKindToolUse + } + + var waveToolCalls []uctypes.WaveToolCall + if len(toolCallsInProgress) > 0 { + for _, tc := range toolCallsInProgress { + var inputJSON any + if tc.Function.Arguments != "" { + if err := json.Unmarshal([]byte(tc.Function.Arguments), &inputJSON); err != nil { + log.Printf("openaicomp: failed to parse tool call arguments: %v\n", err) + continue + } + } + waveToolCalls = append(waveToolCalls, uctypes.WaveToolCall{ + ID: tc.ID, + Name: tc.Function.Name, + Input: inputJSON, + }) + } } stopReason := &uctypes.WaveStopReason{ Kind: stopKind, RawReason: finishReason, + ToolCalls: waveToolCalls, } assistantMsg := &CompletionsChatMessage{ MessageId: msgID, Message: CompletionsMessage{ - Role: "assistant", - Content: textBuilder.String(), + Role: "assistant", + Content: textBuilder.String(), + ToolCalls: toolCallsInProgress, }, } diff --git a/pkg/aiusechat/openaicomp/openaicomp-types.go b/pkg/aiusechat/openaicomp/openaicomp-types.go index 3a22125dee..0a7e9699cf 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-types.go +++ b/pkg/aiusechat/openaicomp/openaicomp-types.go @@ -83,7 +83,7 @@ type StreamChunk struct { type StreamChoice struct { Index int `json:"index"` Delta ContentDelta `json:"delta"` - FinishReason *string `json:"finish_reason"` + FinishReason *string `json:"finish_reason"` // "stop", "length" | "tool_calls" | "content_filter" } // This is the important part: From 09fd9bb575eff70c21fc531db99772b9998e253f Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 20:21:24 -0800 Subject: [PATCH 14/31] convert to role "tool" messages --- .../openaicomp/openaicomp-convertmessage.go | 30 +++++++++++++++++++ pkg/aiusechat/usechat-backend.go | 2 +- pkg/aiusechat/usechat.go | 8 ++--- 3 files changed, 35 insertions(+), 5 deletions(-) diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go index da49269c31..9a19feecad 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -197,6 +197,36 @@ func ConvertAIMessageToCompletionsMessage(aiMsg uctypes.AIMessage) (*Completions }, nil } +// ConvertToolResultsToNativeChatMessage converts tool results to OpenAI tool messages +func ConvertToolResultsToNativeChatMessage(toolResults []uctypes.AIToolResult) ([]uctypes.GenAIMessage, error) { + if len(toolResults) == 0 { + return nil, nil + } + + messages := make([]uctypes.GenAIMessage, 0, len(toolResults)) + for _, toolResult := range toolResults { + var content string + if toolResult.ErrorText != "" { + content = fmt.Sprintf("Error: %s", toolResult.ErrorText) + } else { + content = toolResult.Text + } + + msg := &CompletionsChatMessage{ + MessageId: toolResult.ToolUseID, + Message: CompletionsMessage{ + Role: "tool", + ToolCallID: toolResult.ToolUseID, + Name: toolResult.ToolName, + Content: content, + }, + } + messages = append(messages, msg) + } + + return messages, nil +} + // ConvertAIChatToUIChat converts stored chat to UI format func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { uiChat := &uctypes.UIChat{ diff --git a/pkg/aiusechat/usechat-backend.go b/pkg/aiusechat/usechat-backend.go index 884a94013c..b9be2c59c9 100644 --- a/pkg/aiusechat/usechat-backend.go +++ b/pkg/aiusechat/usechat-backend.go @@ -145,7 +145,7 @@ func (b *openaiCompletionsBackend) UpdateToolUseData(chatId string, toolCallId s } func (b *openaiCompletionsBackend) ConvertToolResultsToNativeChatMessage(toolResults []uctypes.AIToolResult) ([]uctypes.GenAIMessage, error) { - return nil, fmt.Errorf("tools not supported in openai-comp backend") + return openaicomp.ConvertToolResultsToNativeChatMessage(toolResults) } func (b *openaiCompletionsBackend) ConvertAIMessageToNativeChatMessage(message uctypes.AIMessage) (uctypes.GenAIMessage, error) { diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index b91d8879b7..70bde3796f 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -422,7 +422,7 @@ func RunAIChat(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseCha chatOpts.PlatformInfo = platformInfo } } - stopReason, rtnMessage, err := runAIChatStep(ctx, sseHandler, backend, chatOpts, cont) + stopReason, rtnMessages, err := runAIChatStep(ctx, sseHandler, backend, chatOpts, cont) metrics.RequestCount++ if chatOpts.Config.IsPremiumModel() { metrics.PremiumReqCount++ @@ -430,8 +430,8 @@ func RunAIChat(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseCha if chatOpts.Config.IsWaveProxy() { metrics.ProxyReqCount++ } - if len(rtnMessage) > 0 { - usage := getUsage(rtnMessage) + if len(rtnMessages) > 0 { + usage := getUsage(rtnMessages) log.Printf("usage: input=%d output=%d websearch=%d\n", usage.InputTokens, usage.OutputTokens, usage.NativeWebSearchCount) metrics.Usage.InputTokens += usage.InputTokens metrics.Usage.OutputTokens += usage.OutputTokens @@ -450,7 +450,7 @@ func RunAIChat(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseCha _ = sseHandler.AiMsgFinish("", nil) break } - for _, msg := range rtnMessage { + for _, msg := range rtnMessages { if msg != nil { chatstore.DefaultChatStore.PostMessage(chatOpts.ChatId, &chatOpts.Config, msg) } From 2644d9b1e9d3346515cf73b7a48381882403cf0d Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 20:35:52 -0800 Subject: [PATCH 15/31] fix so a return message has EITHER tool calls OR content --- .../openaicomp/openaicomp-backend.go | 21 ++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/pkg/aiusechat/openaicomp/openaicomp-backend.go b/pkg/aiusechat/openaicomp/openaicomp-backend.go index 4255b8ccdf..34d59d097f 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-backend.go +++ b/pkg/aiusechat/openaicomp/openaicomp-backend.go @@ -198,9 +198,16 @@ func processCompletionsStream( stopKind = uctypes.StopKindToolUse } + var validToolCalls []ToolCall + for _, tc := range toolCallsInProgress { + if tc.ID != "" && tc.Function.Name != "" { + validToolCalls = append(validToolCalls, tc) + } + } + var waveToolCalls []uctypes.WaveToolCall - if len(toolCallsInProgress) > 0 { - for _, tc := range toolCallsInProgress { + if len(validToolCalls) > 0 { + for _, tc := range validToolCalls { var inputJSON any if tc.Function.Arguments != "" { if err := json.Unmarshal([]byte(tc.Function.Arguments), &inputJSON); err != nil { @@ -225,12 +232,16 @@ func processCompletionsStream( assistantMsg := &CompletionsChatMessage{ MessageId: msgID, Message: CompletionsMessage{ - Role: "assistant", - Content: textBuilder.String(), - ToolCalls: toolCallsInProgress, + Role: "assistant", }, } + if len(validToolCalls) > 0 { + assistantMsg.Message.ToolCalls = validToolCalls + } else { + assistantMsg.Message.Content = textBuilder.String() + } + if textStarted { _ = sseHandler.AiMsgTextEnd(textID) } From 597a67b69875a48c27645ebd2ea836afad6ce97a Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 20:49:04 -0800 Subject: [PATCH 16/31] implement two tool functions in the backend interface --- .../openaicomp/openaicomp-convertmessage.go | 52 +++++++++++++++++++ pkg/aiusechat/openaicomp/openaicomp-types.go | 31 +++++++++++ pkg/aiusechat/usechat-backend.go | 4 +- 3 files changed, 85 insertions(+), 2 deletions(-) diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go index 9a19feecad..8d4d356597 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -14,6 +14,7 @@ import ( "strings" "github.com/wavetermdev/waveterm/pkg/aiusechat/aiutil" + "github.com/wavetermdev/waveterm/pkg/aiusechat/chatstore" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" "github.com/wavetermdev/waveterm/pkg/wavebase" ) @@ -292,3 +293,54 @@ func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { return uiChat, nil } + +// GetFunctionCallInputByToolCallId searches for a tool call by ID in the chat history +func GetFunctionCallInputByToolCallId(aiChat uctypes.AIChat, toolCallId string) *uctypes.AIFunctionCallInput { + for _, genMsg := range aiChat.NativeMessages { + compMsg, ok := genMsg.(*CompletionsChatMessage) + if !ok { + continue + } + idx := compMsg.Message.FindToolCallIndex(toolCallId) + if idx == -1 { + continue + } + toolCall := compMsg.Message.ToolCalls[idx] + return &uctypes.AIFunctionCallInput{ + CallId: toolCall.ID, + Name: toolCall.Function.Name, + Arguments: toolCall.Function.Arguments, + ToolUseData: toolCall.ToolUseData, + } + } + return nil +} + +// UpdateToolUseData updates the ToolUseData for a specific tool call in the chat history +func UpdateToolUseData(chatId string, callId string, newToolUseData *uctypes.UIMessageDataToolUse) error { + chat := chatstore.DefaultChatStore.Get(chatId) + if chat == nil { + return fmt.Errorf("chat not found: %s", chatId) + } + + for _, genMsg := range chat.NativeMessages { + compMsg, ok := genMsg.(*CompletionsChatMessage) + if !ok { + continue + } + idx := compMsg.Message.FindToolCallIndex(callId) + if idx == -1 { + continue + } + updatedMsg := compMsg.Copy() + updatedMsg.Message.ToolCalls[idx].ToolUseData = newToolUseData + aiOpts := &uctypes.AIOptsType{ + APIType: chat.APIType, + Model: chat.Model, + APIVersion: chat.APIVersion, + } + return chatstore.DefaultChatStore.PostMessage(chatId, aiOpts, updatedMsg) + } + + return fmt.Errorf("tool call with callId %s not found in chat %s", callId, chatId) +} diff --git a/pkg/aiusechat/openaicomp/openaicomp-types.go b/pkg/aiusechat/openaicomp/openaicomp-types.go index 0a7e9699cf..7b8a64445e 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-types.go +++ b/pkg/aiusechat/openaicomp/openaicomp-types.go @@ -40,6 +40,15 @@ func (cm *CompletionsMessage) clean() *CompletionsMessage { return &rtn } +func (cm *CompletionsMessage) FindToolCallIndex(toolCallId string) int { + for i, tc := range cm.ToolCalls { + if tc.ID == toolCallId { + return i + } + } + return -1 +} + type ToolDefinition struct { Type string `json:"type"` // "function" Function ToolFunctionDef `json:"function"` @@ -138,3 +147,25 @@ func (m *CompletionsChatMessage) GetUsage() *uctypes.AIUsage { OutputTokens: m.Usage.CompletionTokens, } } + +func (m *CompletionsChatMessage) Copy() *CompletionsChatMessage { + if m == nil { + return nil + } + copy := *m + if len(m.Message.ToolCalls) > 0 { + copy.Message.ToolCalls = make([]ToolCall, len(m.Message.ToolCalls)) + for i, tc := range m.Message.ToolCalls { + copy.Message.ToolCalls[i] = tc + if tc.ToolUseData != nil { + toolUseDataCopy := *tc.ToolUseData + copy.Message.ToolCalls[i].ToolUseData = &toolUseDataCopy + } + } + } + if m.Usage != nil { + usageCopy := *m.Usage + copy.Usage = &usageCopy + } + return © +} diff --git a/pkg/aiusechat/usechat-backend.go b/pkg/aiusechat/usechat-backend.go index b9be2c59c9..16c12a337b 100644 --- a/pkg/aiusechat/usechat-backend.go +++ b/pkg/aiusechat/usechat-backend.go @@ -141,7 +141,7 @@ func (b *openaiCompletionsBackend) RunChatStep( } func (b *openaiCompletionsBackend) UpdateToolUseData(chatId string, toolCallId string, toolUseData *uctypes.UIMessageDataToolUse) error { - return fmt.Errorf("tools not supported in openai-comp backend") + return openaicomp.UpdateToolUseData(chatId, toolCallId, toolUseData) } func (b *openaiCompletionsBackend) ConvertToolResultsToNativeChatMessage(toolResults []uctypes.AIToolResult) ([]uctypes.GenAIMessage, error) { @@ -153,7 +153,7 @@ func (b *openaiCompletionsBackend) ConvertAIMessageToNativeChatMessage(message u } func (b *openaiCompletionsBackend) GetFunctionCallInputByToolCallId(aiChat uctypes.AIChat, toolCallId string) *uctypes.AIFunctionCallInput { - return nil + return openaicomp.GetFunctionCallInputByToolCallId(aiChat, toolCallId) } func (b *openaiCompletionsBackend) ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { From f11cc8d5fb878ac472816ffc8f9c1d5fc1aa1edb Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 21:36:47 -0800 Subject: [PATCH 17/31] make sendToolProgress and createToolUseData generic --- pkg/aiusechat/aiutil/aiutil.go | 82 ++++++++++++++++++++++++++ pkg/aiusechat/openai/openai-backend.go | 79 ++----------------------- 2 files changed, 86 insertions(+), 75 deletions(-) diff --git a/pkg/aiusechat/aiutil/aiutil.go b/pkg/aiusechat/aiutil/aiutil.go index aa66529fab..b8235d8419 100644 --- a/pkg/aiusechat/aiutil/aiutil.go +++ b/pkg/aiusechat/aiutil/aiutil.go @@ -5,6 +5,7 @@ package aiutil import ( "bytes" + "context" "crypto/sha256" "encoding/base64" "encoding/hex" @@ -12,9 +13,12 @@ import ( "fmt" "strconv" "strings" + "time" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" "github.com/wavetermdev/waveterm/pkg/util/utilfn" + "github.com/wavetermdev/waveterm/pkg/wcore" + "github.com/wavetermdev/waveterm/pkg/web/sse" ) // ExtractXmlAttribute extracts an attribute value from an XML-like tag. @@ -189,3 +193,81 @@ func IsOpenAIReasoningModel(model string) bool { strings.HasPrefix(m, "gpt-5") || strings.HasPrefix(m, "gpt-5.1") } + +// CreateToolUseData creates a UIMessageDataToolUse from tool call information +func CreateToolUseData(toolCallID, toolName string, arguments string, chatOpts uctypes.WaveChatOpts) *uctypes.UIMessageDataToolUse { + toolUseData := &uctypes.UIMessageDataToolUse{ + ToolCallId: toolCallID, + ToolName: toolName, + Status: uctypes.ToolUseStatusPending, + } + + toolDef := chatOpts.GetToolDefinition(toolName) + if toolDef == nil { + toolUseData.Status = uctypes.ToolUseStatusError + toolUseData.ErrorMessage = "tool not found" + return toolUseData + } + + var parsedArgs any + if err := json.Unmarshal([]byte(arguments), &parsedArgs); err != nil { + toolUseData.Status = uctypes.ToolUseStatusError + toolUseData.ErrorMessage = fmt.Sprintf("failed to parse tool arguments: %v", err) + return toolUseData + } + + if toolDef.ToolCallDesc != nil { + toolUseData.ToolDesc = toolDef.ToolCallDesc(parsedArgs, nil, nil) + } + + if toolDef.ToolApproval != nil { + toolUseData.Approval = toolDef.ToolApproval(parsedArgs) + } + + if chatOpts.TabId != "" { + if argsMap, ok := parsedArgs.(map[string]any); ok { + if widgetId, ok := argsMap["widget_id"].(string); ok && widgetId != "" { + ctx, cancelFn := context.WithTimeout(context.Background(), 2*time.Second) + defer cancelFn() + fullBlockId, err := wcore.ResolveBlockIdFromPrefix(ctx, chatOpts.TabId, widgetId) + if err == nil { + toolUseData.BlockId = fullBlockId + } + } + } + } + + return toolUseData +} + + +// SendToolProgress sends tool progress updates via SSE if the tool has a progress descriptor +func SendToolProgress(toolCallID, toolName string, jsonData []byte, chatOpts uctypes.WaveChatOpts, sseHandler *sse.SSEHandlerCh, usePartialParse bool) { + toolDef := chatOpts.GetToolDefinition(toolName) + if toolDef == nil || toolDef.ToolProgressDesc == nil { + return + } + + var parsedJSON any + var err error + if usePartialParse { + parsedJSON, err = utilfn.ParsePartialJson(jsonData) + } else { + err = json.Unmarshal(jsonData, &parsedJSON) + } + if err != nil { + return + } + + statusLines, err := toolDef.ToolProgressDesc(parsedJSON) + if err != nil { + return + } + + progressData := &uctypes.UIMessageDataToolProgress{ + ToolCallId: toolCallID, + ToolName: toolName, + StatusLines: statusLines, + } + _ = sseHandler.AiMsgData("data-toolprogress", "progress-"+toolCallID, progressData) +} diff --git a/pkg/aiusechat/openai/openai-backend.go b/pkg/aiusechat/openai/openai-backend.go index cced0dd06d..f25774554a 100644 --- a/pkg/aiusechat/openai/openai-backend.go +++ b/pkg/aiusechat/openai/openai-backend.go @@ -17,11 +17,11 @@ import ( "github.com/google/uuid" "github.com/launchdarkly/eventsource" + "github.com/wavetermdev/waveterm/pkg/aiusechat/aiutil" "github.com/wavetermdev/waveterm/pkg/aiusechat/chatstore" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" "github.com/wavetermdev/waveterm/pkg/util/logutil" "github.com/wavetermdev/waveterm/pkg/util/utilfn" - "github.com/wavetermdev/waveterm/pkg/wcore" "github.com/wavetermdev/waveterm/pkg/web/sse" ) @@ -862,8 +862,7 @@ func handleOpenAIEvent( } if st := state.blockMap[ev.ItemId]; st != nil && st.kind == openaiBlockToolUse { st.partialJSON = append(st.partialJSON, []byte(ev.Delta)...) - toolDef := state.chatOpts.GetToolDefinition(st.toolName) - sendToolProgress(st, toolDef, sse, st.partialJSON, true) + aiutil.SendToolProgress(st.toolCallID, st.toolName, st.partialJSON, state.chatOpts, sse, true) } return nil, nil @@ -876,10 +875,9 @@ func handleOpenAIEvent( // Get the function call info from the block state if st := state.blockMap[ev.ItemId]; st != nil && st.kind == openaiBlockToolUse { - toolDef := state.chatOpts.GetToolDefinition(st.toolName) - toolUseData := createToolUseData(st.toolCallID, st.toolName, toolDef, ev.Arguments, state.chatOpts) + toolUseData := aiutil.CreateToolUseData(st.toolCallID, st.toolName, ev.Arguments, state.chatOpts) state.toolUseData[st.toolCallID] = toolUseData - sendToolProgress(st, toolDef, sse, []byte(ev.Arguments), false) + aiutil.SendToolProgress(st.toolCallID, st.toolName, []byte(ev.Arguments), state.chatOpts, sse, false) } return nil, nil @@ -936,75 +934,6 @@ func handleOpenAIEvent( } } -func sendToolProgress(st *openaiBlockState, toolDef *uctypes.ToolDefinition, sse *sse.SSEHandlerCh, jsonData []byte, usePartialParse bool) { - if toolDef == nil || toolDef.ToolProgressDesc == nil { - return - } - var parsedJSON any - var err error - if usePartialParse { - parsedJSON, err = utilfn.ParsePartialJson(jsonData) - } else { - err = json.Unmarshal(jsonData, &parsedJSON) - } - if err != nil { - return - } - statusLines, err := toolDef.ToolProgressDesc(parsedJSON) - if err != nil { - return - } - progressData := &uctypes.UIMessageDataToolProgress{ - ToolCallId: st.toolCallID, - ToolName: st.toolName, - StatusLines: statusLines, - } - _ = sse.AiMsgData("data-toolprogress", "progress-"+st.toolCallID, progressData) -} - -func createToolUseData(toolCallID, toolName string, toolDef *uctypes.ToolDefinition, arguments string, chatOpts uctypes.WaveChatOpts) *uctypes.UIMessageDataToolUse { - toolUseData := &uctypes.UIMessageDataToolUse{ - ToolCallId: toolCallID, - ToolName: toolName, - Status: uctypes.ToolUseStatusPending, - } - - if toolDef == nil { - toolUseData.Status = uctypes.ToolUseStatusError - toolUseData.ErrorMessage = "tool not found" - return toolUseData - } - - var parsedArgs any - if err := json.Unmarshal([]byte(arguments), &parsedArgs); err != nil { - toolUseData.Status = uctypes.ToolUseStatusError - toolUseData.ErrorMessage = fmt.Sprintf("failed to parse tool arguments: %v", err) - return toolUseData - } - - if toolDef.ToolCallDesc != nil { - toolUseData.ToolDesc = toolDef.ToolCallDesc(parsedArgs, nil, nil) - } - - if toolDef.ToolApproval != nil { - toolUseData.Approval = toolDef.ToolApproval(parsedArgs) - } - - if chatOpts.TabId != "" { - if argsMap, ok := parsedArgs.(map[string]any); ok { - if widgetId, ok := argsMap["widget_id"].(string); ok && widgetId != "" { - ctx, cancelFn := context.WithTimeout(context.Background(), 2*time.Second) - defer cancelFn() - fullBlockId, err := wcore.ResolveBlockIdFromPrefix(ctx, chatOpts.TabId, widgetId) - if err == nil { - toolUseData.BlockId = fullBlockId - } - } - } - } - - return toolUseData -} // extractMessageAndToolsFromResponse extracts the final OpenAI message and tool calls from the completed response func extractMessageAndToolsFromResponse(resp openaiResponse, state *openaiStreamingState) ([]*OpenAIChatMessage, []uctypes.WaveToolCall) { From b2862e38803f9d370245266347877eacc348e770 Mon Sep 17 00:00:00 2001 From: sawka Date: Mon, 24 Nov 2025 22:37:59 -0800 Subject: [PATCH 18/31] move toolusedata to the usechat pkg and out of the backends --- frontend/app/aipanel/aipanel.tsx | 2 ++ pkg/aiusechat/aiutil/aiutil.go | 4 +-- pkg/aiusechat/openai/openai-backend.go | 36 +++++-------------- .../openaicomp/openaicomp-convertmessage.go | 4 +-- pkg/aiusechat/usechat-backend.go | 8 ++--- pkg/aiusechat/usechat.go | 35 +++++++++++------- 6 files changed, 42 insertions(+), 47 deletions(-) diff --git a/frontend/app/aipanel/aipanel.tsx b/frontend/app/aipanel/aipanel.tsx index 79ae04fcc1..9ade5bf513 100644 --- a/frontend/app/aipanel/aipanel.tsx +++ b/frontend/app/aipanel/aipanel.tsx @@ -246,6 +246,8 @@ const AIPanelComponentInner = memo(() => { model.registerUseChatData(sendMessage, setMessages, status, stop); // console.log("AICHAT messages", messages); + (window as any).aichatmessages = messages; + (window as any).aichatstatus = status; const handleKeyDown = (waveEvent: WaveKeyboardEvent): boolean => { if (checkKeyPressed(waveEvent, "Cmd:k")) { diff --git a/pkg/aiusechat/aiutil/aiutil.go b/pkg/aiusechat/aiutil/aiutil.go index b8235d8419..0fd4854469 100644 --- a/pkg/aiusechat/aiutil/aiutil.go +++ b/pkg/aiusechat/aiutil/aiutil.go @@ -195,8 +195,8 @@ func IsOpenAIReasoningModel(model string) bool { } // CreateToolUseData creates a UIMessageDataToolUse from tool call information -func CreateToolUseData(toolCallID, toolName string, arguments string, chatOpts uctypes.WaveChatOpts) *uctypes.UIMessageDataToolUse { - toolUseData := &uctypes.UIMessageDataToolUse{ +func CreateToolUseData(toolCallID, toolName string, arguments string, chatOpts uctypes.WaveChatOpts) uctypes.UIMessageDataToolUse { + toolUseData := uctypes.UIMessageDataToolUse{ ToolCallId: toolCallID, ToolName: toolName, Status: uctypes.ToolUseStatusPending, diff --git a/pkg/aiusechat/openai/openai-backend.go b/pkg/aiusechat/openai/openai-backend.go index f25774554a..356384f579 100644 --- a/pkg/aiusechat/openai/openai-backend.go +++ b/pkg/aiusechat/openai/openai-backend.go @@ -9,7 +9,6 @@ import ( "errors" "fmt" "io" - "log" "net/http" "net/url" "strings" @@ -396,8 +395,7 @@ type openaiBlockState struct { } type openaiStreamingState struct { - blockMap map[string]*openaiBlockState // Use item_id as key for UI streaming - toolUseData map[string]*uctypes.UIMessageDataToolUse // Use toolCallId as key + blockMap map[string]*openaiBlockState // Use item_id as key for UI streaming msgID string model string stepStarted bool @@ -407,7 +405,7 @@ type openaiStreamingState struct { // ---------- Public entrypoint ---------- -func UpdateToolUseData(chatId string, callId string, newToolUseData *uctypes.UIMessageDataToolUse) error { +func UpdateToolUseData(chatId string, callId string, newToolUseData uctypes.UIMessageDataToolUse) error { chat := chatstore.DefaultChatStore.Get(chatId) if chat == nil { return fmt.Errorf("chat not found: %s", chatId) @@ -422,7 +420,7 @@ func UpdateToolUseData(chatId string, callId string, newToolUseData *uctypes.UIM if chatMsg.FunctionCall != nil && chatMsg.FunctionCall.CallId == callId { updatedMsg := *chatMsg updatedFunctionCall := *chatMsg.FunctionCall - updatedFunctionCall.ToolUseData = newToolUseData + updatedFunctionCall.ToolUseData = &newToolUseData updatedMsg.FunctionCall = &updatedFunctionCall aiOpts := &uctypes.AIOptsType{ @@ -592,9 +590,8 @@ func parseOpenAIHTTPError(resp *http.Response) error { func handleOpenAIStreamingResp(ctx context.Context, sse *sse.SSEHandlerCh, decoder *eventsource.Decoder, cont *uctypes.WaveContinueResponse, chatOpts uctypes.WaveChatOpts) (*uctypes.WaveStopReason, []*OpenAIChatMessage) { // Per-response state state := &openaiStreamingState{ - blockMap: map[string]*openaiBlockState{}, - toolUseData: map[string]*uctypes.UIMessageDataToolUse{}, - chatOpts: chatOpts, + blockMap: map[string]*openaiBlockState{}, + chatOpts: chatOpts, } var rtnStopReason *uctypes.WaveStopReason @@ -875,8 +872,6 @@ func handleOpenAIEvent( // Get the function call info from the block state if st := state.blockMap[ev.ItemId]; st != nil && st.kind == openaiBlockToolUse { - toolUseData := aiutil.CreateToolUseData(st.toolCallID, st.toolName, ev.Arguments, state.chatOpts) - state.toolUseData[st.toolCallID] = toolUseData aiutil.SendToolProgress(st.toolCallID, st.toolName, []byte(ev.Arguments), state.chatOpts, sse, false) } return nil, nil @@ -934,7 +929,6 @@ func handleOpenAIEvent( } } - // extractMessageAndToolsFromResponse extracts the final OpenAI message and tool calls from the completed response func extractMessageAndToolsFromResponse(resp openaiResponse, state *openaiStreamingState) ([]*OpenAIChatMessage, []uctypes.WaveToolCall) { var messageContent []OpenAIMessageContent @@ -969,13 +963,6 @@ func extractMessageAndToolsFromResponse(resp openaiResponse, state *openaiStream } } - // Attach UIToolUseData if available - if data, ok := state.toolUseData[outputItem.CallId]; ok { - toolCall.ToolUseData = data - } else { - log.Printf("AI no data-tooluse for %s (callid: %s)\n", outputItem.Id, outputItem.CallId) - } - toolCalls = append(toolCalls, toolCall) // Create separate FunctionCall message @@ -983,18 +970,13 @@ func extractMessageAndToolsFromResponse(resp openaiResponse, state *openaiStream if outputItem.Arguments != "" { argsStr = outputItem.Arguments } - var toolUseDataPtr *uctypes.UIMessageDataToolUse - if data, ok := state.toolUseData[outputItem.CallId]; ok { - toolUseDataPtr = data - } functionCallMsg := &OpenAIChatMessage{ MessageId: uuid.New().String(), FunctionCall: &OpenAIFunctionCallInput{ - Type: "function_call", - CallId: outputItem.CallId, - Name: outputItem.Name, - Arguments: argsStr, - ToolUseData: toolUseDataPtr, + Type: "function_call", + CallId: outputItem.CallId, + Name: outputItem.Name, + Arguments: argsStr, }, } messages = append(messages, functionCallMsg) diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go index 8d4d356597..5116974ae7 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -317,7 +317,7 @@ func GetFunctionCallInputByToolCallId(aiChat uctypes.AIChat, toolCallId string) } // UpdateToolUseData updates the ToolUseData for a specific tool call in the chat history -func UpdateToolUseData(chatId string, callId string, newToolUseData *uctypes.UIMessageDataToolUse) error { +func UpdateToolUseData(chatId string, callId string, newToolUseData uctypes.UIMessageDataToolUse) error { chat := chatstore.DefaultChatStore.Get(chatId) if chat == nil { return fmt.Errorf("chat not found: %s", chatId) @@ -333,7 +333,7 @@ func UpdateToolUseData(chatId string, callId string, newToolUseData *uctypes.UIM continue } updatedMsg := compMsg.Copy() - updatedMsg.Message.ToolCalls[idx].ToolUseData = newToolUseData + updatedMsg.Message.ToolCalls[idx].ToolUseData = &newToolUseData aiOpts := &uctypes.AIOptsType{ APIType: chat.APIType, Model: chat.Model, diff --git a/pkg/aiusechat/usechat-backend.go b/pkg/aiusechat/usechat-backend.go index 16c12a337b..71cbdccd72 100644 --- a/pkg/aiusechat/usechat-backend.go +++ b/pkg/aiusechat/usechat-backend.go @@ -29,7 +29,7 @@ type UseChatBackend interface { // UpdateToolUseData updates the tool use data for a specific tool call in the chat. // This is used to update the UI state for tool execution (approval status, results, etc.) - UpdateToolUseData(chatId string, toolCallId string, toolUseData *uctypes.UIMessageDataToolUse) error + UpdateToolUseData(chatId string, toolCallId string, toolUseData uctypes.UIMessageDataToolUse) error // ConvertToolResultsToNativeChatMessage converts tool execution results into native chat messages // that can be sent back to the AI backend. Returns a slice of messages (some backends may @@ -86,7 +86,7 @@ func (b *openaiResponsesBackend) RunChatStep( return stopReason, genMsgs, rateLimitInfo, err } -func (b *openaiResponsesBackend) UpdateToolUseData(chatId string, toolCallId string, toolUseData *uctypes.UIMessageDataToolUse) error { +func (b *openaiResponsesBackend) UpdateToolUseData(chatId string, toolCallId string, toolUseData uctypes.UIMessageDataToolUse) error { return openai.UpdateToolUseData(chatId, toolCallId, toolUseData) } @@ -140,7 +140,7 @@ func (b *openaiCompletionsBackend) RunChatStep( return stopReason, genMsgs, rateLimitInfo, err } -func (b *openaiCompletionsBackend) UpdateToolUseData(chatId string, toolCallId string, toolUseData *uctypes.UIMessageDataToolUse) error { +func (b *openaiCompletionsBackend) UpdateToolUseData(chatId string, toolCallId string, toolUseData uctypes.UIMessageDataToolUse) error { return openaicomp.UpdateToolUseData(chatId, toolCallId, toolUseData) } @@ -173,7 +173,7 @@ func (b *anthropicBackend) RunChatStep( return stopReason, []uctypes.GenAIMessage{msg}, rateLimitInfo, err } -func (b *anthropicBackend) UpdateToolUseData(chatId string, toolCallId string, toolUseData *uctypes.UIMessageDataToolUse) error { +func (b *anthropicBackend) UpdateToolUseData(chatId string, toolCallId string, toolUseData uctypes.UIMessageDataToolUse) error { return fmt.Errorf("UpdateToolUseData not implemented for anthropic backend") } diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index 70bde3796f..e255b86da7 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -17,6 +17,7 @@ import ( "time" "github.com/google/uuid" + "github.com/wavetermdev/waveterm/pkg/aiusechat/aiutil" "github.com/wavetermdev/waveterm/pkg/aiusechat/chatstore" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" "github.com/wavetermdev/waveterm/pkg/secretstore" @@ -239,7 +240,7 @@ func GetChatUsage(chat *uctypes.AIChat) uctypes.AIUsage { return usage } -func updateToolUseDataInChat(backend UseChatBackend, chatOpts uctypes.WaveChatOpts, toolCallID string, toolUseData *uctypes.UIMessageDataToolUse) { +func updateToolUseDataInChat(backend UseChatBackend, chatOpts uctypes.WaveChatOpts, toolCallID string, toolUseData uctypes.UIMessageDataToolUse) { if err := backend.UpdateToolUseData(chatOpts.ChatId, toolCallID, toolUseData); err != nil { log.Printf("failed to update tool use data in chat: %v\n", err) } @@ -279,7 +280,7 @@ func processToolCallInternal(backend UseChatBackend, toolCall uctypes.WaveToolCa } // ToolVerifyInput can modify the toolusedata. re-send it here. _ = sseHandler.AiMsgData("data-tooluse", toolCall.ID, *toolCall.ToolUseData) - updateToolUseDataInChat(backend, chatOpts, toolCall.ID, toolCall.ToolUseData) + updateToolUseDataInChat(backend, chatOpts, toolCall.ID, *toolCall.ToolUseData) } if toolCall.ToolUseData.Approval == uctypes.ApprovalNeedsApproval { @@ -308,7 +309,7 @@ func processToolCallInternal(backend UseChatBackend, toolCall uctypes.WaveToolCa // this still happens here because we need to update the FE to say the tool call was approved _ = sseHandler.AiMsgData("data-tooluse", toolCall.ID, *toolCall.ToolUseData) - updateToolUseDataInChat(backend, chatOpts, toolCall.ID, toolCall.ToolUseData) + updateToolUseDataInChat(backend, chatOpts, toolCall.ID, *toolCall.ToolUseData) } toolCall.ToolUseData.RunTs = time.Now().UnixMilli() @@ -344,7 +345,7 @@ func processToolCall(backend UseChatBackend, toolCall uctypes.WaveToolCall, chat if toolCall.ToolUseData != nil { _ = sseHandler.AiMsgData("data-tooluse", toolCall.ID, *toolCall.ToolUseData) - updateToolUseDataInChat(backend, chatOpts, toolCall.ID, toolCall.ToolUseData) + updateToolUseDataInChat(backend, chatOpts, toolCall.ID, *toolCall.ToolUseData) } return result @@ -356,17 +357,27 @@ func processToolCalls(backend UseChatBackend, stopReason *uctypes.WaveStopReason defer activeToolMap.Delete(toolCall.ID) } - // Send all data-tooluse packets at the beginning - for _, toolCall := range stopReason.ToolCalls { - if toolCall.ToolUseData != nil { - log.Printf("AI data-tooluse %s\n", toolCall.ID) - _ = sseHandler.AiMsgData("data-tooluse", toolCall.ID, *toolCall.ToolUseData) - updateToolUseDataInChat(backend, chatOpts, toolCall.ID, toolCall.ToolUseData) - if toolCall.ToolUseData.Approval == uctypes.ApprovalNeedsApproval && chatOpts.RegisterToolApproval != nil { - chatOpts.RegisterToolApproval(toolCall.ID) + // Create and send all data-tooluse packets at the beginning + for i := range stopReason.ToolCalls { + toolCall := &stopReason.ToolCalls[i] + // Create toolUseData from the tool call input + var argsJSON string + if toolCall.Input != nil { + argsBytes, err := json.Marshal(toolCall.Input) + if err == nil { + argsJSON = string(argsBytes) } } + toolUseData := aiutil.CreateToolUseData(toolCall.ID, toolCall.Name, argsJSON, chatOpts) + stopReason.ToolCalls[i].ToolUseData = &toolUseData + log.Printf("AI data-tooluse %s\n", toolCall.ID) + _ = sseHandler.AiMsgData("data-tooluse", toolCall.ID, toolUseData) + updateToolUseDataInChat(backend, chatOpts, toolCall.ID, toolUseData) + if toolUseData.Approval == uctypes.ApprovalNeedsApproval && chatOpts.RegisterToolApproval != nil { + chatOpts.RegisterToolApproval(toolCall.ID) + } } + // At this point, all ToolCalls are guaranteed to have non-nil ToolUseData var toolResults []uctypes.AIToolResult for _, toolCall := range stopReason.ToolCalls { From 2d1567ce2a3ac250356ccb6dcaf3410a5186fd37 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 11:43:23 -0800 Subject: [PATCH 19/31] get tools working --- go.mod | 1 + go.sum | 2 ++ pkg/aiusechat/openaicomp/openaicomp-backend.go | 13 +++++++++---- 3 files changed, 12 insertions(+), 4 deletions(-) diff --git a/go.mod b/go.mod index d6339d50a1..b165226881 100644 --- a/go.mod +++ b/go.mod @@ -81,6 +81,7 @@ require ( github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mattn/go-isatty v0.0.20 // indirect + github.com/outrigdev/goid v0.3.0 // indirect github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/sirupsen/logrus v1.9.3 // indirect diff --git a/go.sum b/go.sum index fbc5bc2d2f..e44a38bfdd 100644 --- a/go.sum +++ b/go.sum @@ -146,6 +146,8 @@ github.com/mattn/go-sqlite3 v1.14.32 h1:JD12Ag3oLy1zQA+BNn74xRgaBbdhbNIDYvQUEuuE github.com/mattn/go-sqlite3 v1.14.32/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/outrigdev/goid v0.3.0 h1:t/otQD3EXc45cLtQVPUnNgEyRaTQA4cPeu3qVcrsIws= +github.com/outrigdev/goid v0.3.0/go.mod h1:hEH7f27ypN/GHWt/7gvkRoFYR0LZizfUBIAbak4neVE= github.com/photostorm/pty v1.1.19-0.20230903182454-31354506054b h1:cLGKfKb1uk0hxI0Q8L83UAJPpeJ+gSpn3cCU/tjd3eg= github.com/photostorm/pty v1.1.19-0.20230903182454-31354506054b/go.mod h1:KO+FcPtyLAiRC0hJwreJVvfwc7vnNz77UxBTIGHdPVk= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= diff --git a/pkg/aiusechat/openaicomp/openaicomp-backend.go b/pkg/aiusechat/openaicomp/openaicomp-backend.go index 34d59d097f..188319e370 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-backend.go +++ b/pkg/aiusechat/openaicomp/openaicomp-backend.go @@ -60,7 +60,7 @@ func RunCompletionsChatStep( if !ok { return nil, nil, nil, fmt.Errorf("expected CompletionsChatMessage, got %T", genMsg) } - messages = append(messages, compMsg.Message) + messages = append(messages, *compMsg.Message.clean()) } req, err := buildCompletionsHTTPRequest(ctx, messages, chatOpts) @@ -88,7 +88,7 @@ func RunCompletionsChatStep( } // Stream processing - stopReason, assistantMsg, err := processCompletionsStream(ctx, resp.Body, sseHandler, chatOpts) + stopReason, assistantMsg, err := processCompletionsStream(ctx, resp.Body, sseHandler, chatOpts, cont) if err != nil { return nil, nil, nil, err } @@ -101,6 +101,7 @@ func processCompletionsStream( body io.Reader, sseHandler *sse.SSEHandlerCh, chatOpts uctypes.WaveChatOpts, + cont *uctypes.WaveContinueResponse, ) (*uctypes.WaveStopReason, *CompletionsChatMessage, error) { decoder := eventsource.NewDecoder(body) var textBuilder strings.Builder @@ -110,7 +111,9 @@ func processCompletionsStream( textStarted := false var toolCallsInProgress []ToolCall - _ = sseHandler.AiMsgStart(msgID) + if cont == nil { + _ = sseHandler.AiMsgStart(msgID) + } _ = sseHandler.AiMsgStartStep() for { @@ -246,7 +249,9 @@ func processCompletionsStream( _ = sseHandler.AiMsgTextEnd(textID) } _ = sseHandler.AiMsgFinishStep() - _ = sseHandler.AiMsgFinish(finishReason, nil) + if stopKind != uctypes.StopKindToolUse { + _ = sseHandler.AiMsgFinish(finishReason, nil) + } return stopReason, assistantMsg, nil } From 2d7ce41e86c339caaf93081c31c1efd345492a55 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 12:17:52 -0800 Subject: [PATCH 20/31] move thinkingmodes to config --- frontend/types/gotypes.d.ts | 20 ++++++ pkg/aiusechat/usechat-thinkingmode.go | 87 ++++++++++----------------- pkg/wconfig/defaultconfig/waveai.json | 41 +++++++++++++ pkg/wconfig/settingsconfig.go | 38 +++++++++--- 4 files changed, 121 insertions(+), 65 deletions(-) create mode 100644 pkg/wconfig/defaultconfig/waveai.json diff --git a/frontend/types/gotypes.d.ts b/frontend/types/gotypes.d.ts index 1996e0df56..74cb51817b 100644 --- a/frontend/types/gotypes.d.ts +++ b/frontend/types/gotypes.d.ts @@ -32,6 +32,25 @@ declare global { capabilities?: string[]; }; + // wconfig.AIThinkingModeConfigType + type AIThinkingModeConfigType = { + "display:name": string; + "display:order"?: number; + "display:icon": string; + "display:shortdesc"?: string; + "display:description": string; + "ai:apitype": string; + "ai:model": string; + "ai:thinkinglevel": string; + "ai:baseurl"?: string; + "ai:apiversion"?: string; + "ai:apitoken"?: string; + "ai:apitokensecretname"?: string; + "ai:capabilities"?: string[]; + "waveai:cloud"?: boolean; + "waveai:premium": boolean; + }; + // wshrpc.ActivityDisplayType type ActivityDisplayType = { width: number; @@ -769,6 +788,7 @@ declare global { termthemes: {[key: string]: TermThemeType}; connections: {[key: string]: ConnKeywords}; bookmarks: {[key: string]: WebBookmark}; + waveai: {[key: string]: AIThinkingModeConfigType}; configerrors: ConfigError[]; }; diff --git a/pkg/aiusechat/usechat-thinkingmode.go b/pkg/aiusechat/usechat-thinkingmode.go index b383652efd..0e9ac3d321 100644 --- a/pkg/aiusechat/usechat-thinkingmode.go +++ b/pkg/aiusechat/usechat-thinkingmode.go @@ -8,65 +8,39 @@ import ( "sort" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" + "github.com/wavetermdev/waveterm/pkg/wconfig" ) -var thinkingModeConfigs = map[string]uctypes.AIThinkingModeConfig{ - uctypes.ThinkingModeQuick: { - Mode: uctypes.ThinkingModeQuick, - DisplayName: "Quick", - DisplayOrder: -3, - APIType: APIType_OpenAI, - Model: uctypes.DefaultOpenAIModel, - ThinkingLevel: uctypes.ThinkingLevelLow, - WaveAICloud: true, - Premium: false, - DisplayIcon: "bolt", - Description: "Fastest responses (gpt-5-mini)", - Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, - }, - uctypes.ThinkingModeBalanced: { - Mode: uctypes.ThinkingModeBalanced, - DisplayName: "Balanced", - DisplayOrder: -2, - APIType: APIType_OpenAI, - Model: uctypes.PremiumOpenAIModel, - ThinkingLevel: uctypes.ThinkingLevelLow, - WaveAICloud: true, - Premium: true, - DisplayIcon: "sparkles", - Description: "Good mix of speed and accuracy\n(gpt-5.1 with minimal thinking)", - Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, - }, - uctypes.ThinkingModeDeep: { - Mode: uctypes.ThinkingModeDeep, - DisplayName: "Deep", - DisplayOrder: -1, - APIType: APIType_OpenAI, - Model: uctypes.PremiumOpenAIModel, - ThinkingLevel: uctypes.ThinkingLevelMedium, - WaveAICloud: true, - Premium: true, - DisplayIcon: "lightbulb", - Description: "Slower but most capable\n(gpt-5.1 with full reasoning)", - Capabilities: []string{uctypes.AICapabilityTools, uctypes.AICapabilityImages, uctypes.AICapabilityPdfs}, - }, - "openrouter:mistral": { - Mode: "openrouter:mistral", - DisplayName: "Mistral (OpenRouter)", - APIType: APIType_OpenAIComp, - BaseURL: "https://openrouter.ai/api/v1/chat/completions", - Model: "mistralai/mistral-small-3.2-24b-instruct", - ThinkingLevel: uctypes.ThinkingLevelLow, - APITokenSecretName: "OPENROUTER_KEY", - Premium: false, - DisplayIcon: "bolt", - Description: "Fast and capable via OpenRouter\n(Mistral Small 3.2)", - Capabilities: []string{uctypes.AICapabilityTools}, - }, +func getThinkingModeConfigs() map[string]uctypes.AIThinkingModeConfig { + fullConfig := wconfig.GetWatcher().GetFullConfig() + configs := make(map[string]uctypes.AIThinkingModeConfig) + + for mode, cfg := range fullConfig.WaveAIModes { + configs[mode] = uctypes.AIThinkingModeConfig{ + Mode: mode, + DisplayName: cfg.DisplayName, + DisplayOrder: cfg.DisplayOrder, + DisplayIcon: cfg.DisplayIcon, + APIType: cfg.APIType, + Model: cfg.Model, + ThinkingLevel: cfg.ThinkingLevel, + BaseURL: cfg.BaseURL, + WaveAICloud: cfg.WaveAICloud, + APIVersion: cfg.APIVersion, + APIToken: cfg.APIToken, + APITokenSecretName: cfg.APITokenSecretName, + Premium: cfg.WaveAIPremium, + Description: cfg.DisplayDescription, + Capabilities: cfg.Capabilities, + } + } + + return configs } func getThinkingModeConfig(thinkingMode string) (*uctypes.AIThinkingModeConfig, error) { - config, ok := thinkingModeConfigs[thinkingMode] + configs := getThinkingModeConfigs() + config, ok := configs[thinkingMode] if !ok { return nil, fmt.Errorf("invalid thinking mode: %s", thinkingMode) } @@ -76,8 +50,9 @@ func getThinkingModeConfig(thinkingMode string) (*uctypes.AIThinkingModeConfig, } func WaveAIGetModes() ([]uctypes.AIThinkingModeConfig, error) { - modes := make([]uctypes.AIThinkingModeConfig, 0, len(thinkingModeConfigs)) - for _, config := range thinkingModeConfigs { + configs := getThinkingModeConfigs() + modes := make([]uctypes.AIThinkingModeConfig, 0, len(configs)) + for _, config := range configs { modes = append(modes, config) } sort.Slice(modes, func(i, j int) bool { diff --git a/pkg/wconfig/defaultconfig/waveai.json b/pkg/wconfig/defaultconfig/waveai.json new file mode 100644 index 0000000000..b69cc2d041 --- /dev/null +++ b/pkg/wconfig/defaultconfig/waveai.json @@ -0,0 +1,41 @@ +{ + "waveai@quick": { + "display:name": "Quick", + "display:order": -3, + "display:icon": "bolt", + "display:shortdesc": "gpt-5-mini", + "display:description": "Fastest responses (gpt-5-mini)", + "ai:apitype": "openai", + "ai:model": "gpt-5-mini", + "ai:thinkinglevel": "low", + "ai:capabilities": ["tools", "images", "pdfs"], + "waveai:cloud": true, + "waveai:premium": false + }, + "waveai@balanced": { + "display:name": "Balanced", + "display:order": -2, + "display:icon": "sparkles", + "display:shortdesc": "gpt-5.1, low thinking", + "display:description": "Good mix of speed and accuracy\n(gpt-5.1 with minimal thinking)", + "ai:apitype": "openai", + "ai:model": "gpt-5.1", + "ai:thinkinglevel": "low", + "ai:capabilities": ["tools", "images", "pdfs"], + "waveai:cloud": true, + "waveai:premium": true + }, + "waveai@deep": { + "display:name": "Deep", + "display:order": -1, + "display:icon": "lightbulb", + "display:shortdesc": "gpt-5.1, full thinking", + "display:description": "Slower but most capable\n(gpt-5.1 with full reasoning)", + "ai:apitype": "openai", + "ai:model": "gpt-5.1", + "ai:thinkinglevel": "medium", + "ai:capabilities": ["tools", "images", "pdfs"], + "waveai:cloud": true, + "waveai:premium": true + } +} diff --git a/pkg/wconfig/settingsconfig.go b/pkg/wconfig/settingsconfig.go index 4de30cbb6d..b45f9019fd 100644 --- a/pkg/wconfig/settingsconfig.go +++ b/pkg/wconfig/settingsconfig.go @@ -257,17 +257,37 @@ type WebBookmark struct { DisplayOrder float64 `json:"display:order,omitempty"` } +type AIThinkingModeConfigType struct { + DisplayName string `json:"display:name"` + DisplayOrder float64 `json:"display:order,omitempty"` + DisplayIcon string `json:"display:icon"` + DisplayShortDesc string `json:"display:shortdesc,omitempty"` + DisplayDescription string `json:"display:description"` + APIType string `json:"ai:apitype"` + Model string `json:"ai:model"` + ThinkingLevel string `json:"ai:thinkinglevel"` + BaseURL string `json:"ai:baseurl,omitempty"` + APIVersion string `json:"ai:apiversion,omitempty"` + APIToken string `json:"ai:apitoken,omitempty"` + APITokenSecretName string `json:"ai:apitokensecretname,omitempty"` + Capabilities []string `json:"ai:capabilities,omitempty"` + WaveAICloud bool `json:"waveai:cloud,omitempty"` + WaveAIPremium bool `json:"waveai:premium"` +} + type FullConfigType struct { - Settings SettingsType `json:"settings" merge:"meta"` - MimeTypes map[string]MimeTypeConfigType `json:"mimetypes"` - DefaultWidgets map[string]WidgetConfigType `json:"defaultwidgets"` - Widgets map[string]WidgetConfigType `json:"widgets"` - Presets map[string]waveobj.MetaMapType `json:"presets"` - TermThemes map[string]TermThemeType `json:"termthemes"` - Connections map[string]ConnKeywords `json:"connections"` - Bookmarks map[string]WebBookmark `json:"bookmarks"` - ConfigErrors []ConfigError `json:"configerrors" configfile:"-"` + Settings SettingsType `json:"settings" merge:"meta"` + MimeTypes map[string]MimeTypeConfigType `json:"mimetypes"` + DefaultWidgets map[string]WidgetConfigType `json:"defaultwidgets"` + Widgets map[string]WidgetConfigType `json:"widgets"` + Presets map[string]waveobj.MetaMapType `json:"presets"` + TermThemes map[string]TermThemeType `json:"termthemes"` + Connections map[string]ConnKeywords `json:"connections"` + Bookmarks map[string]WebBookmark `json:"bookmarks"` + WaveAIModes map[string]AIThinkingModeConfigType `json:"waveai"` + ConfigErrors []ConfigError `json:"configerrors" configfile:"-"` } + type ConnKeywords struct { ConnWshEnabled *bool `json:"conn:wshenabled,omitempty"` ConnAskBeforeWshInstall *bool `json:"conn:askbeforewshinstall,omitempty"` From 13c84b6592238145073a6fbac31a91fb990276a3 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 12:27:51 -0800 Subject: [PATCH 21/31] update consts for new names --- frontend/app/aipanel/aipanel-contextmenu.ts | 14 +++++------ frontend/app/aipanel/thinkingmode.tsx | 27 ++++++++++----------- frontend/app/aipanel/waveai-model.tsx | 4 +-- pkg/aiusechat/uctypes/uctypes.go | 6 ++--- 4 files changed, 25 insertions(+), 26 deletions(-) diff --git a/frontend/app/aipanel/aipanel-contextmenu.ts b/frontend/app/aipanel/aipanel-contextmenu.ts index b7a7f718d4..bd4f7e9f0d 100644 --- a/frontend/app/aipanel/aipanel-contextmenu.ts +++ b/frontend/app/aipanel/aipanel-contextmenu.ts @@ -41,7 +41,7 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo const rateLimitInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom); const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; - const currentThinkingMode = rtInfo?.["waveai:thinkingmode"] ?? (hasPremium ? "balanced" : "quick"); + const currentThinkingMode = rtInfo?.["waveai:thinkingmode"] ?? (hasPremium ? "waveai@balanced" : "waveai@quick"); const defaultTokens = model.inBuilder ? 24576 : 4096; const currentMaxTokens = rtInfo?.["waveai:maxoutputtokens"] ?? defaultTokens; @@ -49,37 +49,37 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo { label: "Quick (gpt-5-mini)", type: "checkbox", - checked: currentThinkingMode === "quick", + checked: currentThinkingMode === "waveai@quick", click: () => { RpcApi.SetRTInfoCommand(TabRpcClient, { oref: model.orefContext, - data: { "waveai:thinkingmode": "quick" }, + data: { "waveai:thinkingmode": "waveai@quick" }, }); }, }, { label: hasPremium ? "Balanced (gpt-5.1, low thinking)" : "Balanced (premium)", type: "checkbox", - checked: currentThinkingMode === "balanced", + checked: currentThinkingMode === "waveai@balanced", enabled: hasPremium, click: () => { if (!hasPremium) return; RpcApi.SetRTInfoCommand(TabRpcClient, { oref: model.orefContext, - data: { "waveai:thinkingmode": "balanced" }, + data: { "waveai:thinkingmode": "waveai@balanced" }, }); }, }, { label: hasPremium ? "Deep (gpt-5.1, full thinking)" : "Deep (premium)", type: "checkbox", - checked: currentThinkingMode === "deep", + checked: currentThinkingMode === "waveai@deep", enabled: hasPremium, click: () => { if (!hasPremium) return; RpcApi.SetRTInfoCommand(TabRpcClient, { oref: model.orefContext, - data: { "waveai:thinkingmode": "deep" }, + data: { "waveai:thinkingmode": "waveai@deep" }, }); }, }, diff --git a/frontend/app/aipanel/thinkingmode.tsx b/frontend/app/aipanel/thinkingmode.tsx index 98b2a33115..85d5d90fac 100644 --- a/frontend/app/aipanel/thinkingmode.tsx +++ b/frontend/app/aipanel/thinkingmode.tsx @@ -36,22 +36,21 @@ export const ThinkingLevelDropdown = memo(() => { setIsOpen(false); }; - let currentMode = thinkingMode || "balanced"; + let currentMode = thinkingMode || "waveai@balanced"; const currentConfig = configsMap[currentMode]; - if (!currentConfig) { - return null; - } - if (!hasPremium && currentConfig.premium) { - currentMode = "quick"; - } - if (hideQuick && currentMode === "quick") { - currentMode = "balanced"; + if (currentConfig) { + if (!hasPremium && currentConfig.premium) { + currentMode = "waveai@quick"; + } + if (hideQuick && currentMode === "waveai@quick") { + currentMode = "waveai@balanced"; + } } - const displayConfig = configsMap[currentMode]; - if (!displayConfig) { - return null; - } + const displayConfig = configsMap[currentMode] || { + "display:name": "? Unknown", + "display:icon": "question" + }; return (
@@ -80,7 +79,7 @@ export const ThinkingLevelDropdown = memo(() => { (a["display:order"] || 0) - (b["display:order"] || 0) || (a["display:name"] || "").localeCompare(b["display:name"] || "") ) - .filter((config) => !(hideQuick && config.mode === "quick")) + .filter((config) => !(hideQuick && config.mode === "waveai@quick")) .map((config, index, filteredConfigs) => { const isFirst = index === 0; const isLast = index === filteredConfigs.length - 1; diff --git a/frontend/app/aipanel/waveai-model.tsx b/frontend/app/aipanel/waveai-model.tsx index 0d7c9c873e..e7f16d4d38 100644 --- a/frontend/app/aipanel/waveai-model.tsx +++ b/frontend/app/aipanel/waveai-model.tsx @@ -57,7 +57,7 @@ export class WaveAIModel { widgetAccessAtom!: jotai.Atom; droppedFiles: jotai.PrimitiveAtom = jotai.atom([]); chatId!: jotai.PrimitiveAtom; - thinkingMode: jotai.PrimitiveAtom = jotai.atom("balanced"); + thinkingMode: jotai.PrimitiveAtom = jotai.atom("waveai@balanced"); thinkingModeConfigs: jotai.PrimitiveAtom = jotai.atom([]); errorMessage: jotai.PrimitiveAtom = jotai.atom(null) as jotai.PrimitiveAtom; modelAtom!: jotai.Atom; @@ -360,7 +360,7 @@ export class WaveAIModel { } globalStore.set(this.chatId, chatIdValue); - const thinkingModeValue = rtInfo?.["waveai:thinkingmode"] ?? "balanced"; + const thinkingModeValue = rtInfo?.["waveai:thinkingmode"] ?? "waveai@balanced"; globalStore.set(this.thinkingMode, thinkingModeValue); try { diff --git a/pkg/aiusechat/uctypes/uctypes.go b/pkg/aiusechat/uctypes/uctypes.go index fc76c576cd..0e2c869c2b 100644 --- a/pkg/aiusechat/uctypes/uctypes.go +++ b/pkg/aiusechat/uctypes/uctypes.go @@ -139,9 +139,9 @@ const ( ) const ( - ThinkingModeQuick = "quick" - ThinkingModeBalanced = "balanced" - ThinkingModeDeep = "deep" + ThinkingModeQuick = "waveai@quick" + ThinkingModeBalanced = "waveai@balanced" + ThinkingModeDeep = "waveai@deep" ) const ( From a8cf3a97115df5de9e84f1844985c33f777c6738 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 15:35:03 -0800 Subject: [PATCH 22/31] stricter about absolute file paths --- pkg/aiusechat/tools_readdir.go | 2 +- pkg/aiusechat/tools_readfile.go | 2 +- pkg/aiusechat/tools_writefile.go | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/pkg/aiusechat/tools_readdir.go b/pkg/aiusechat/tools_readdir.go index 4b90d664c0..f5d03578fe 100644 --- a/pkg/aiusechat/tools_readdir.go +++ b/pkg/aiusechat/tools_readdir.go @@ -118,7 +118,7 @@ func GetReadDirToolDefinition() uctypes.ToolDefinition { "properties": map[string]any{ "path": map[string]any{ "type": "string", - "description": "Path to the directory to read. Supports '~' for the user's home directory.", + "description": "Absolute path to the directory to read. Supports '~' for the user's home directory. Relative paths are not supported.", }, "max_entries": map[string]any{ "type": "integer", diff --git a/pkg/aiusechat/tools_readfile.go b/pkg/aiusechat/tools_readfile.go index 423333c831..13fa5bc34d 100644 --- a/pkg/aiusechat/tools_readfile.go +++ b/pkg/aiusechat/tools_readfile.go @@ -328,7 +328,7 @@ func GetReadTextFileToolDefinition() uctypes.ToolDefinition { "properties": map[string]any{ "filename": map[string]any{ "type": "string", - "description": "Path to the file to read. Supports '~' for the user's home directory.", + "description": "Absolute path to the file to read. Supports '~' for the user's home directory. Relative paths are not supported.", }, "origin": map[string]any{ "type": "string", diff --git a/pkg/aiusechat/tools_writefile.go b/pkg/aiusechat/tools_writefile.go index 2c830fd64c..91af47ef10 100644 --- a/pkg/aiusechat/tools_writefile.go +++ b/pkg/aiusechat/tools_writefile.go @@ -184,7 +184,7 @@ func GetWriteTextFileToolDefinition() uctypes.ToolDefinition { "properties": map[string]any{ "filename": map[string]any{ "type": "string", - "description": "Path to the file to write. Supports '~' for the user's home directory.", + "description": "Absolute path to the file to write. Supports '~' for the user's home directory. Relative paths are not supported.", }, "contents": map[string]any{ "type": "string", @@ -340,7 +340,7 @@ func GetEditTextFileToolDefinition() uctypes.ToolDefinition { "properties": map[string]any{ "filename": map[string]any{ "type": "string", - "description": "Path to the file to edit. Supports '~' for the user's home directory.", + "description": "Absolute path to the file to edit. Supports '~' for the user's home directory. Relative paths are not supported.", }, "edits": map[string]any{ "type": "array", @@ -476,7 +476,7 @@ func GetDeleteTextFileToolDefinition() uctypes.ToolDefinition { "properties": map[string]any{ "filename": map[string]any{ "type": "string", - "description": "Path to the file to delete. Supports '~' for the user's home directory.", + "description": "Absolute path to the file to delete. Supports '~' for the user's home directory. Relative paths are not supported.", }, }, "required": []string{"filename"}, From 6d2b340af01374e947c2dc52b411e244e6f40576 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 16:02:09 -0800 Subject: [PATCH 23/31] update prompt for better tool calling in some models --- .../openaicomp/openaicomp-convertmessage.go | 2 +- pkg/aiusechat/usechat-prompts.go | 62 +++++++++++++++++ pkg/aiusechat/usechat.go | 68 ++++--------------- 3 files changed, 77 insertions(+), 55 deletions(-) create mode 100644 pkg/aiusechat/usechat-prompts.go diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go index 5116974ae7..896f88d75a 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -78,7 +78,7 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess if len(chatOpts.SystemPrompt) > 0 { systemMessage := CompletionsMessage{ Role: "system", - Content: strings.Join(chatOpts.SystemPrompt, "\n"), + Content: strings.Join(chatOpts.SystemPrompt, "\n\n"), } finalMessages = append([]CompletionsMessage{systemMessage}, messages...) } diff --git a/pkg/aiusechat/usechat-prompts.go b/pkg/aiusechat/usechat-prompts.go new file mode 100644 index 0000000000..f3638afce9 --- /dev/null +++ b/pkg/aiusechat/usechat-prompts.go @@ -0,0 +1,62 @@ +// Copyright 2025, Command Line Inc. +// SPDX-License-Identifier: Apache-2.0 + +package aiusechat + +import "strings" + +var SystemPromptText = strings.Join([]string{ + `You are Wave AI, an intelligent assistant embedded within Wave Terminal, a modern terminal application with graphical widgets.`, + `You appear as a pull-out panel on the left side of a tab, with the tab's widgets laid out on the right.`, + `Widget context is provided as informationa only.`, + `Do NOT assume any API access or ability to interact with the widgets except via tools provided (note that some widgets may expose NO tools, so their context is informational only).`, +}, " ") + +var SystemPromptText_OpenAI = strings.Join([]string{ + `You are Wave AI, an assistant embedded in Wave Terminal (a terminal with graphical widgets).`, + `You appear as a pull-out panel on the left; widgets are on the right.`, + + // Capabilities & truthfulness + `Tools define your only capabilities. If a capability is not provided by a tool, you cannot do it.`, + `Context from widgets is read-only unless a tool explicitly grants interaction.`, + `Never fabricate data. If you lack data or access, say so and offer the next best step (e.g., suggest enabling a tool).`, + + // Crisp behavior + `Be concise and direct. Prefer determinism over speculation. If a brief clarifying question eliminates guesswork, ask it.`, + + // Attached text files + `User-attached text files may appear inline as \ncontent\n.`, + `User-attached directories use the tag JSON DirInfo.`, + `If multiple attached files exist, treat each as a separate source file with its own file_name.`, + `When the user refers to these files, use their inline content directly; do NOT call any read_text_file or file-access tools to re-read them unless asked.`, + + // Output & formatting + `When presenting commands or any runnable multi-line code, always use fenced Markdown code blocks.`, + `Use an appropriate language hint after the opening fence (e.g., "bash" for shell commands, "go" for Go, "json" for JSON).`, + `For shell commands, do NOT prefix lines with "$" or shell prompts. Use placeholders in ALL_CAPS (e.g., PROJECT_ID) and explain them once after the block if needed.`, + "Reserve inline code (single backticks) for short references like command names (`grep`, `less`), flags, env vars, file paths, or tiny snippets not meant to be executed.", + `You may use Markdown (lists, tables, bold/italics) to improve readability.`, + `Never comment on or justify your formatting choices; just follow these rules.`, + `When generating code or command blocks, try to keep lines under ~100 characters wide where practical (soft wrap; do not break tokens mid-word). Favor indentation and short variable names to stay compact, but correctness always takes priority.`, + + // Safety & limits + `If a request would execute dangerous or destructive actions, warn briefly and provide a safer alternative.`, + `If output is very long, prefer a brief summary plus a copy-ready fenced block or offer a follow-up chunking strategy.`, + + `You can write and edit local files on disk using available tools, but you cannot execute shell commands, run code in the terminal, or read/write remote files.`, + `If the user asks you to execute commands or run code, or deal with remote files say that these features aren't available yet AND make sure to say that they are coming soon (stay tuned for updates).`, + `Instead, show them exactly what command or code they could copy-paste to run manually.`, + + // Final reminder + `You have NO API access to widgets or Wave unless provided via an explicit tool.`, +}, " ") + +var SystemPromptText_StrictToolAddOn = `## Tool Call Rules (STRICT) + +When you decide a file write/edit tool call is needed: + +- Output ONLY the tool call. +- Do NOT include any explanation, summary, or file content in the chat. +- Do NOT echo the file content before or after the tool call. +- After the tool call result is returned, respond ONLY with what the user directly asked for. If they did not ask to see the file content, do NOT show it. +` diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index e255b86da7..7e623f043a 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -12,6 +12,7 @@ import ( "net/http" "os" "os/user" + "regexp" "strings" "sync" "time" @@ -53,51 +54,18 @@ var ( activeChats = ds.MakeSyncMap[bool]() // key is chatid ) -var SystemPromptText = strings.Join([]string{ - `You are Wave AI, an intelligent assistant embedded within Wave Terminal, a modern terminal application with graphical widgets.`, - `You appear as a pull-out panel on the left side of a tab, with the tab's widgets laid out on the right.`, - `Widget context is provided as informationa only.`, - `Do NOT assume any API access or ability to interact with the widgets except via tools provided (note that some widgets may expose NO tools, so their context is informational only).`, -}, " ") - -var SystemPromptText_OpenAI = strings.Join([]string{ - `You are Wave AI, an assistant embedded in Wave Terminal (a terminal with graphical widgets).`, - `You appear as a pull-out panel on the left; widgets are on the right.`, - - // Capabilities & truthfulness - `Tools define your only capabilities. If a capability is not provided by a tool, you cannot do it.`, - `Context from widgets is read-only unless a tool explicitly grants interaction.`, - `Never fabricate data. If you lack data or access, say so and offer the next best step (e.g., suggest enabling a tool).`, - - // Crisp behavior - `Be concise and direct. Prefer determinism over speculation. If a brief clarifying question eliminates guesswork, ask it.`, - - // Attached text files - `User-attached text files may appear inline as \ncontent\n.`, - `User-attached directories use the tag JSON DirInfo.`, - `If multiple attached files exist, treat each as a separate source file with its own file_name.`, - `When the user refers to these files, use their inline content directly; do NOT call any read_text_file or file-access tools to re-read them unless asked.`, - - // Output & formatting - `When presenting commands or any runnable multi-line code, always use fenced Markdown code blocks.`, - `Use an appropriate language hint after the opening fence (e.g., "bash" for shell commands, "go" for Go, "json" for JSON).`, - `For shell commands, do NOT prefix lines with "$" or shell prompts. Use placeholders in ALL_CAPS (e.g., PROJECT_ID) and explain them once after the block if needed.`, - "Reserve inline code (single backticks) for short references like command names (`grep`, `less`), flags, env vars, file paths, or tiny snippets not meant to be executed.", - `You may use Markdown (lists, tables, bold/italics) to improve readability.`, - `Never comment on or justify your formatting choices; just follow these rules.`, - `When generating code or command blocks, try to keep lines under ~100 characters wide where practical (soft wrap; do not break tokens mid-word). Favor indentation and short variable names to stay compact, but correctness always takes priority.`, - - // Safety & limits - `If a request would execute dangerous or destructive actions, warn briefly and provide a safer alternative.`, - `If output is very long, prefer a brief summary plus a copy-ready fenced block or offer a follow-up chunking strategy.`, - - `You can write and edit local files on disk using available tools, but you cannot execute shell commands, run code in the terminal, or read/write remote files.`, - `If the user asks you to execute commands or run code, or deal with remote files say that these features aren't available yet AND make sure to say that they are coming soon (stay tuned for updates).`, - `Instead, show them exactly what command or code they could copy-paste to run manually.`, - - // Final reminder - `You have NO API access to widgets or Wave unless provided via an explicit tool.`, -}, " ") +func getSystemPrompt(apiType string, model string, isBuilder bool) []string { + if isBuilder { + return []string{} + } + basePrompt := SystemPromptText_OpenAI + modelLower := strings.ToLower(model) + needsStrictToolAddOn, _ := regexp.MatchString(`(?i)\b(mistral|o?llama|qwen|mixtral|yi|phi|deepseek)\b`, modelLower) + if needsStrictToolAddOn { + return []string{basePrompt, SystemPromptText_StrictToolAddOn} + } + return []string{basePrompt} +} func getWaveAISettings(premium bool, builderMode bool, rtInfo *waveobj.ObjRTInfo) (*uctypes.AIOptsType, error) { maxTokens := DefaultMaxTokens @@ -687,15 +655,7 @@ func WaveAIPostMessageHandler(w http.ResponseWriter, r *http.Request) { BuilderId: req.BuilderId, BuilderAppId: req.BuilderAppId, } - if chatOpts.Config.APIType == APIType_OpenAI { - if chatOpts.BuilderId != "" { - chatOpts.SystemPrompt = []string{} - } else { - chatOpts.SystemPrompt = []string{SystemPromptText_OpenAI} - } - } else { - chatOpts.SystemPrompt = []string{SystemPromptText} - } + chatOpts.SystemPrompt = getSystemPrompt(chatOpts.Config.APIType, chatOpts.Config.Model, chatOpts.BuilderId != "") if req.TabId != "" { chatOpts.TabStateGenerator = func() (string, []uctypes.ToolDefinition, string, error) { From 447d714fa1a41c13f38192d182899c46e59568c4 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 16:17:35 -0800 Subject: [PATCH 24/31] use config obj directly in backend for thinking mode config --- pkg/aiusechat/usechat-thinkingmode.go | 31 ++++++++++++++++++++++----- pkg/aiusechat/usechat.go | 22 ++++++------------- 2 files changed, 33 insertions(+), 20 deletions(-) diff --git a/pkg/aiusechat/usechat-thinkingmode.go b/pkg/aiusechat/usechat-thinkingmode.go index 0e9ac3d321..ebc1bc2d0c 100644 --- a/pkg/aiusechat/usechat-thinkingmode.go +++ b/pkg/aiusechat/usechat-thinkingmode.go @@ -38,15 +38,36 @@ func getThinkingModeConfigs() map[string]uctypes.AIThinkingModeConfig { return configs } -func getThinkingModeConfig(thinkingMode string) (*uctypes.AIThinkingModeConfig, error) { - configs := getThinkingModeConfigs() - config, ok := configs[thinkingMode] +func resolveThinkingMode(requestedMode string, premium bool) (string, *wconfig.AIThinkingModeConfigType, error) { + mode := requestedMode + if mode == "" { + mode = uctypes.ThinkingModeBalanced + } + + config, err := getThinkingModeConfig(mode) + if err != nil { + return "", nil, err + } + + if config.WaveAICloud && !premium { + mode = uctypes.ThinkingModeQuick + config, err = getThinkingModeConfig(mode) + if err != nil { + return "", nil, err + } + } + + return mode, config, nil +} + +func getThinkingModeConfig(thinkingMode string) (*wconfig.AIThinkingModeConfigType, error) { + fullConfig := wconfig.GetWatcher().GetFullConfig() + config, ok := fullConfig.WaveAIModes[thinkingMode] if !ok { return nil, fmt.Errorf("invalid thinking mode: %s", thinkingMode) } - configCopy := config - return &configCopy, nil + return &config, nil } func WaveAIGetModes() ([]uctypes.AIThinkingModeConfig, error) { diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index 7e623f043a..c81588b332 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -67,29 +67,18 @@ func getSystemPrompt(apiType string, model string, isBuilder bool) []string { return []string{basePrompt} } -func getWaveAISettings(premium bool, builderMode bool, rtInfo *waveobj.ObjRTInfo) (*uctypes.AIOptsType, error) { +func getWaveAISettings(premium bool, builderMode bool, rtInfo waveobj.ObjRTInfo) (*uctypes.AIOptsType, error) { maxTokens := DefaultMaxTokens if builderMode { maxTokens = BuilderMaxTokens } - if rtInfo != nil && rtInfo.WaveAIMaxOutputTokens > 0 { + if rtInfo.WaveAIMaxOutputTokens > 0 { maxTokens = rtInfo.WaveAIMaxOutputTokens } - var thinkingMode string - if premium { - thinkingMode = uctypes.ThinkingModeBalanced - if rtInfo != nil && rtInfo.WaveAIThinkingMode != "" { - thinkingMode = rtInfo.WaveAIThinkingMode - } - } else { - thinkingMode = uctypes.ThinkingModeQuick - } - - config, err := getThinkingModeConfig(thinkingMode) + thinkingMode, config, err := resolveThinkingMode(rtInfo.WaveAIThinkingMode, premium) if err != nil { return nil, err } - apiToken := config.APIToken if apiToken == "" && config.APITokenSecretName != "" { secret, exists, err := secretstore.GetSecret(config.APITokenSecretName) @@ -627,11 +616,14 @@ func WaveAIPostMessageHandler(w http.ResponseWriter, r *http.Request) { oref := waveobj.MakeORef(waveobj.OType_Builder, req.BuilderId) rtInfo = wstore.GetRTInfo(oref) } + if rtInfo == nil { + rtInfo = &waveobj.ObjRTInfo{} + } // Get WaveAI settings premium := shouldUsePremium() builderMode := req.BuilderId != "" - aiOpts, err := getWaveAISettings(premium, builderMode, rtInfo) + aiOpts, err := getWaveAISettings(premium, builderMode, *rtInfo) if err != nil { http.Error(w, fmt.Sprintf("WaveAI configuration error: %v", err), http.StatusInternalServerError) return From d7194e1e7ea0ca1bc3393af1500e43aaf68bfb0a Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 16:48:45 -0800 Subject: [PATCH 25/31] rename thinkingmode => ai mode --- .../aipanel/{thinkingmode.tsx => aimode.tsx} | 20 ++++++------- frontend/app/aipanel/aipanel-contextmenu.ts | 20 ++++++------- frontend/app/aipanel/aipanel.tsx | 4 +-- frontend/app/aipanel/aipanelmessages.tsx | 8 +++--- frontend/app/aipanel/waveai-model.tsx | 22 +++++++-------- frontend/app/store/wshclientapi.ts | 2 +- frontend/types/gotypes.d.ts | 14 +++++----- pkg/aiusechat/uctypes/uctypes.go | 14 +++++----- ...sechat-thinkingmode.go => usechat-mode.go} | 28 +++++++++---------- pkg/aiusechat/usechat.go | 10 +++---- pkg/telemetry/telemetrydata/telemetrydata.go | 2 +- pkg/waveobj/objrtinfo.go | 2 +- pkg/wconfig/settingsconfig.go | 22 +++++++-------- pkg/wshrpc/wshclient/wshclient.go | 4 +-- pkg/wshrpc/wshrpctypes.go | 2 +- pkg/wshrpc/wshserver/wshserver.go | 2 +- 16 files changed, 88 insertions(+), 88 deletions(-) rename frontend/app/aipanel/{thinkingmode.tsx => aimode.tsx} (91%) rename pkg/aiusechat/{usechat-thinkingmode.go => usechat-mode.go} (63%) diff --git a/frontend/app/aipanel/thinkingmode.tsx b/frontend/app/aipanel/aimode.tsx similarity index 91% rename from frontend/app/aipanel/thinkingmode.tsx rename to frontend/app/aipanel/aimode.tsx index 85d5d90fac..a262ef1296 100644 --- a/frontend/app/aipanel/thinkingmode.tsx +++ b/frontend/app/aipanel/aimode.tsx @@ -7,10 +7,10 @@ import { useAtomValue } from "jotai"; import { memo, useRef, useState } from "react"; import { WaveAIModel } from "./waveai-model"; -export const ThinkingLevelDropdown = memo(() => { +export const AIModeDropdown = memo(() => { const model = WaveAIModel.getInstance(); - const thinkingMode = useAtomValue(model.thinkingMode); - const thinkingModeConfigs = useAtomValue(model.thinkingModeConfigs); + const aiMode = useAtomValue(model.currentAIMode); + const aiModeConfigs = useAtomValue(model.aiModeConfigs); const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom); const [isOpen, setIsOpen] = useState(false); const dropdownRef = useRef(null); @@ -18,12 +18,12 @@ export const ThinkingLevelDropdown = memo(() => { const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; const hideQuick = model.inBuilder && hasPremium; - const configsMap = thinkingModeConfigs.reduce( + const configsMap = aiModeConfigs.reduce( (acc, config) => { acc[config.mode] = config; return acc; }, - {} as Record + {} as Record ); const handleSelect = (mode: string) => { @@ -32,11 +32,11 @@ export const ThinkingLevelDropdown = memo(() => { if (!hasPremium && config.premium) { return; } - model.setThinkingMode(mode); + model.setAIMode(mode); setIsOpen(false); }; - let currentMode = thinkingMode || "waveai@balanced"; + let currentMode = aiMode || "waveai@balanced"; const currentConfig = configsMap[currentMode]; if (currentConfig) { if (!hasPremium && currentConfig.premium) { @@ -49,7 +49,7 @@ export const ThinkingLevelDropdown = memo(() => { const displayConfig = configsMap[currentMode] || { "display:name": "? Unknown", - "display:icon": "question" + "display:icon": "question", }; return ( @@ -73,7 +73,7 @@ export const ThinkingLevelDropdown = memo(() => { <>
setIsOpen(false)} />
- {thinkingModeConfigs + {aiModeConfigs .sort( (a, b) => (a["display:order"] || 0) - (b["display:order"] || 0) || @@ -119,4 +119,4 @@ export const ThinkingLevelDropdown = memo(() => { ); }); -ThinkingLevelDropdown.displayName = "ThinkingLevelDropdown"; +AIModeDropdown.displayName = "AIModeDropdown"; diff --git a/frontend/app/aipanel/aipanel-contextmenu.ts b/frontend/app/aipanel/aipanel-contextmenu.ts index bd4f7e9f0d..05060b5e64 100644 --- a/frontend/app/aipanel/aipanel-contextmenu.ts +++ b/frontend/app/aipanel/aipanel-contextmenu.ts @@ -41,45 +41,45 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo const rateLimitInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom); const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; - const currentThinkingMode = rtInfo?.["waveai:thinkingmode"] ?? (hasPremium ? "waveai@balanced" : "waveai@quick"); + const currentAIMode = rtInfo?.["waveai:mode"] ?? (hasPremium ? "waveai@balanced" : "waveai@quick"); const defaultTokens = model.inBuilder ? 24576 : 4096; const currentMaxTokens = rtInfo?.["waveai:maxoutputtokens"] ?? defaultTokens; - const thinkingModeSubmenu: ContextMenuItem[] = [ + const aiModeSubmenu: ContextMenuItem[] = [ { label: "Quick (gpt-5-mini)", type: "checkbox", - checked: currentThinkingMode === "waveai@quick", + checked: currentAIMode === "waveai@quick", click: () => { RpcApi.SetRTInfoCommand(TabRpcClient, { oref: model.orefContext, - data: { "waveai:thinkingmode": "waveai@quick" }, + data: { "waveai:mode": "waveai@quick" }, }); }, }, { label: hasPremium ? "Balanced (gpt-5.1, low thinking)" : "Balanced (premium)", type: "checkbox", - checked: currentThinkingMode === "waveai@balanced", + checked: currentAIMode === "waveai@balanced", enabled: hasPremium, click: () => { if (!hasPremium) return; RpcApi.SetRTInfoCommand(TabRpcClient, { oref: model.orefContext, - data: { "waveai:thinkingmode": "waveai@balanced" }, + data: { "waveai:mode": "waveai@balanced" }, }); }, }, { label: hasPremium ? "Deep (gpt-5.1, full thinking)" : "Deep (premium)", type: "checkbox", - checked: currentThinkingMode === "waveai@deep", + checked: currentAIMode === "waveai@deep", enabled: hasPremium, click: () => { if (!hasPremium) return; RpcApi.SetRTInfoCommand(TabRpcClient, { oref: model.orefContext, - data: { "waveai:thinkingmode": "waveai@deep" }, + data: { "waveai:mode": "waveai@deep" }, }); }, }, @@ -164,8 +164,8 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo } menu.push({ - label: "Thinking Mode", - submenu: thinkingModeSubmenu, + label: "AI Mode", + submenu: aiModeSubmenu, }); menu.push({ diff --git a/frontend/app/aipanel/aipanel.tsx b/frontend/app/aipanel/aipanel.tsx index 9ade5bf513..062fc2f559 100644 --- a/frontend/app/aipanel/aipanel.tsx +++ b/frontend/app/aipanel/aipanel.tsx @@ -16,12 +16,12 @@ import { memo, useCallback, useEffect, useRef, useState } from "react"; import { useDrop } from "react-dnd"; import { formatFileSizeError, isAcceptableFile, validateFileSize } from "./ai-utils"; import { AIDroppedFiles } from "./aidroppedfiles"; +import { AIModeDropdown } from "./aimode"; import { AIPanelHeader } from "./aipanelheader"; import { AIPanelInput } from "./aipanelinput"; import { AIPanelMessages } from "./aipanelmessages"; import { AIRateLimitStrip } from "./airatelimitstrip"; import { TelemetryRequiredMessage } from "./telemetryrequired"; -import { ThinkingLevelDropdown } from "./thinkingmode"; import { WaveAIModel } from "./waveai-model"; const AIBlockMask = memo(() => { @@ -500,7 +500,7 @@ const AIPanelComponentInner = memo(() => { onContextMenu={(e) => handleWaveAIContextMenu(e, true)} >
- +
{model.inBuilder ? : }
diff --git a/frontend/app/aipanel/aipanelmessages.tsx b/frontend/app/aipanel/aipanelmessages.tsx index a32e3936b4..3d3ae0d912 100644 --- a/frontend/app/aipanel/aipanelmessages.tsx +++ b/frontend/app/aipanel/aipanelmessages.tsx @@ -4,7 +4,7 @@ import { useAtomValue } from "jotai"; import { memo, useEffect, useRef } from "react"; import { AIMessage } from "./aimessage"; -import { ThinkingLevelDropdown } from "./thinkingmode"; +import { AIModeDropdown } from "./aimode"; import { WaveAIModel } from "./waveai-model"; interface AIPanelMessagesProps { @@ -45,13 +45,13 @@ export const AIPanelMessages = memo(({ messages, status, onContextMenu }: AIPane useEffect(() => { const wasStreaming = prevStatusRef.current === "streaming"; const isNowNotStreaming = status !== "streaming"; - + if (wasStreaming && isNowNotStreaming) { requestAnimationFrame(() => { scrollToBottom(); }); } - + prevStatusRef.current = status; }, [status]); @@ -62,7 +62,7 @@ export const AIPanelMessages = memo(({ messages, status, onContextMenu }: AIPane onContextMenu={onContextMenu} >
- +
{messages.map((message, index) => { const isLastMessage = index === messages.length - 1; diff --git a/frontend/app/aipanel/waveai-model.tsx b/frontend/app/aipanel/waveai-model.tsx index e7f16d4d38..21fbeefe8d 100644 --- a/frontend/app/aipanel/waveai-model.tsx +++ b/frontend/app/aipanel/waveai-model.tsx @@ -57,8 +57,8 @@ export class WaveAIModel { widgetAccessAtom!: jotai.Atom; droppedFiles: jotai.PrimitiveAtom = jotai.atom([]); chatId!: jotai.PrimitiveAtom; - thinkingMode: jotai.PrimitiveAtom = jotai.atom("waveai@balanced"); - thinkingModeConfigs: jotai.PrimitiveAtom = jotai.atom([]); + currentAIMode: jotai.PrimitiveAtom = jotai.atom("waveai@balanced"); + aiModeConfigs: jotai.PrimitiveAtom = jotai.atom([]); errorMessage: jotai.PrimitiveAtom = jotai.atom(null) as jotai.PrimitiveAtom; modelAtom!: jotai.Atom; containerWidth: jotai.PrimitiveAtom = jotai.atom(0); @@ -338,11 +338,11 @@ export class WaveAIModel { }); } - setThinkingMode(mode: string) { - globalStore.set(this.thinkingMode, mode); + setAIMode(mode: string) { + globalStore.set(this.currentAIMode, mode); RpcApi.SetRTInfoCommand(TabRpcClient, { oref: this.orefContext, - data: { "waveai:thinkingmode": mode }, + data: { "waveai:mode": mode }, }); } @@ -360,8 +360,8 @@ export class WaveAIModel { } globalStore.set(this.chatId, chatIdValue); - const thinkingModeValue = rtInfo?.["waveai:thinkingmode"] ?? "waveai@balanced"; - globalStore.set(this.thinkingMode, thinkingModeValue); + const aiModeValue = rtInfo?.["waveai:mode"] ?? "waveai@balanced"; + globalStore.set(this.currentAIMode, aiModeValue); try { const chatData = await RpcApi.GetWaveAIChatCommand(TabRpcClient, { chatid: chatIdValue }); @@ -446,7 +446,7 @@ export class WaveAIModel { async uiLoadInitialChat() { globalStore.set(this.isLoadingChatAtom, true); - await this.loadThinkingModeConfigs(); + await this.loadAIModeConfigs(); const messages = await this.loadInitialChat(); this.useChatSetMessages?.(messages); globalStore.set(this.isLoadingChatAtom, false); @@ -455,14 +455,14 @@ export class WaveAIModel { }, 100); } - async loadThinkingModeConfigs() { + async loadAIModeConfigs() { try { const configs = await RpcApi.WaveAIGetModesCommand(TabRpcClient); if (configs != null && configs.length > 0) { - globalStore.set(this.thinkingModeConfigs, configs); + globalStore.set(this.aiModeConfigs, configs); } } catch (error) { - console.error("Failed to load thinking mode configs:", error); + console.error("Failed to load Wave AI mode configs:", error); } } diff --git a/frontend/app/store/wshclientapi.ts b/frontend/app/store/wshclientapi.ts index 84e47ad0aa..212cc95f50 100644 --- a/frontend/app/store/wshclientapi.ts +++ b/frontend/app/store/wshclientapi.ts @@ -613,7 +613,7 @@ class RpcApiType { } // command "waveaigetmodes" [call] - WaveAIGetModesCommand(client: WshClient, opts?: RpcOpts): Promise { + WaveAIGetModesCommand(client: WshClient, opts?: RpcOpts): Promise { return client.wshRpcCall("waveaigetmodes", null, opts); } diff --git a/frontend/types/gotypes.d.ts b/frontend/types/gotypes.d.ts index 74cb51817b..088db39b3b 100644 --- a/frontend/types/gotypes.d.ts +++ b/frontend/types/gotypes.d.ts @@ -13,8 +13,8 @@ declare global { data64: string; }; - // uctypes.AIThinkingModeConfig - type AIThinkingModeConfig = { + // uctypes.AIModeConfig + type AIModeConfig = { mode: string; "display:name": string; "display:order"?: number; @@ -32,8 +32,8 @@ declare global { capabilities?: string[]; }; - // wconfig.AIThinkingModeConfigType - type AIThinkingModeConfigType = { + // wconfig.AIModeConfigType + type AIModeConfigType = { "display:name": string; "display:order"?: number; "display:icon": string; @@ -788,7 +788,7 @@ declare global { termthemes: {[key: string]: TermThemeType}; connections: {[key: string]: ConnKeywords}; bookmarks: {[key: string]: WebBookmark}; - waveai: {[key: string]: AIThinkingModeConfigType}; + waveai: {[key: string]: AIModeConfigType}; configerrors: ConfigError[]; }; @@ -969,7 +969,7 @@ declare global { "builder:appid"?: string; "builder:env"?: {[key: string]: string}; "waveai:chatid"?: string; - "waveai:thinkingmode"?: string; + "waveai:mode"?: string; "waveai:maxoutputtokens"?: number; }; @@ -1279,7 +1279,7 @@ declare global { "waveai:requestdurms"?: number; "waveai:widgetaccess"?: boolean; "waveai:thinkinglevel"?: string; - "waveai:thinkingmode"?: string; + "waveai:mode"?: string; "waveai:feedback"?: "good" | "bad"; "waveai:action"?: string; $set?: TEventUserProps; diff --git a/pkg/aiusechat/uctypes/uctypes.go b/pkg/aiusechat/uctypes/uctypes.go index 0e2c869c2b..5fea0aa43f 100644 --- a/pkg/aiusechat/uctypes/uctypes.go +++ b/pkg/aiusechat/uctypes/uctypes.go @@ -139,9 +139,9 @@ const ( ) const ( - ThinkingModeQuick = "waveai@quick" - ThinkingModeBalanced = "waveai@balanced" - ThinkingModeDeep = "waveai@deep" + AIModeQuick = "waveai@quick" + AIModeBalanced = "waveai@balanced" + AIModeDeep = "waveai@deep" ) const ( @@ -164,7 +164,7 @@ const ( ApprovalAutoApproved = "auto-approved" ) -type AIThinkingModeConfig struct { +type AIModeConfig struct { Mode string `json:"mode"` DisplayName string `json:"display:name"` DisplayOrder float64 `json:"display:order,omitempty"` @@ -182,7 +182,7 @@ type AIThinkingModeConfig struct { Capabilities []string `json:"capabilities,omitempty"` } -func (c *AIThinkingModeConfig) HasCapability(cap string) bool { +func (c *AIModeConfig) HasCapability(cap string) bool { return slices.Contains(c.Capabilities, cap) } @@ -258,7 +258,7 @@ type AIOptsType struct { MaxTokens int `json:"maxtokens,omitempty"` TimeoutMs int `json:"timeoutms,omitempty"` ThinkingLevel string `json:"thinkinglevel,omitempty"` // ThinkingLevelLow, ThinkingLevelMedium, or ThinkingLevelHigh - ThinkingMode string `json:"thinkingmode,omitempty"` // quick, balanced, or deep + AIMode string `json:"aimode,omitempty"` Capabilities []string `json:"capabilities,omitempty"` } @@ -309,7 +309,7 @@ type AIMetrics struct { RequestDuration int `json:"requestduration"` // ms WidgetAccess bool `json:"widgetaccess"` ThinkingLevel string `json:"thinkinglevel,omitempty"` - ThinkingMode string `json:"thinkingmode,omitempty"` + AIMode string `json:"aimode,omitempty"` } type AIFunctionCallInput struct { diff --git a/pkg/aiusechat/usechat-thinkingmode.go b/pkg/aiusechat/usechat-mode.go similarity index 63% rename from pkg/aiusechat/usechat-thinkingmode.go rename to pkg/aiusechat/usechat-mode.go index ebc1bc2d0c..e2e5b9ec93 100644 --- a/pkg/aiusechat/usechat-thinkingmode.go +++ b/pkg/aiusechat/usechat-mode.go @@ -11,12 +11,12 @@ import ( "github.com/wavetermdev/waveterm/pkg/wconfig" ) -func getThinkingModeConfigs() map[string]uctypes.AIThinkingModeConfig { +func getAIModeConfigs() map[string]uctypes.AIModeConfig { fullConfig := wconfig.GetWatcher().GetFullConfig() - configs := make(map[string]uctypes.AIThinkingModeConfig) + configs := make(map[string]uctypes.AIModeConfig) for mode, cfg := range fullConfig.WaveAIModes { - configs[mode] = uctypes.AIThinkingModeConfig{ + configs[mode] = uctypes.AIModeConfig{ Mode: mode, DisplayName: cfg.DisplayName, DisplayOrder: cfg.DisplayOrder, @@ -38,20 +38,20 @@ func getThinkingModeConfigs() map[string]uctypes.AIThinkingModeConfig { return configs } -func resolveThinkingMode(requestedMode string, premium bool) (string, *wconfig.AIThinkingModeConfigType, error) { +func resolveAIMode(requestedMode string, premium bool) (string, *wconfig.AIModeConfigType, error) { mode := requestedMode if mode == "" { - mode = uctypes.ThinkingModeBalanced + mode = uctypes.AIModeBalanced } - config, err := getThinkingModeConfig(mode) + config, err := getAIModeConfig(mode) if err != nil { return "", nil, err } if config.WaveAICloud && !premium { - mode = uctypes.ThinkingModeQuick - config, err = getThinkingModeConfig(mode) + mode = uctypes.AIModeQuick + config, err = getAIModeConfig(mode) if err != nil { return "", nil, err } @@ -60,19 +60,19 @@ func resolveThinkingMode(requestedMode string, premium bool) (string, *wconfig.A return mode, config, nil } -func getThinkingModeConfig(thinkingMode string) (*wconfig.AIThinkingModeConfigType, error) { +func getAIModeConfig(aiMode string) (*wconfig.AIModeConfigType, error) { fullConfig := wconfig.GetWatcher().GetFullConfig() - config, ok := fullConfig.WaveAIModes[thinkingMode] + config, ok := fullConfig.WaveAIModes[aiMode] if !ok { - return nil, fmt.Errorf("invalid thinking mode: %s", thinkingMode) + return nil, fmt.Errorf("invalid AI mode: %s", aiMode) } return &config, nil } -func WaveAIGetModes() ([]uctypes.AIThinkingModeConfig, error) { - configs := getThinkingModeConfigs() - modes := make([]uctypes.AIThinkingModeConfig, 0, len(configs)) +func WaveAIGetModes() ([]uctypes.AIModeConfig, error) { + configs := getAIModeConfigs() + modes := make([]uctypes.AIModeConfig, 0, len(configs)) for _, config := range configs { modes = append(modes, config) } diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index c81588b332..64cb97d052 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -75,7 +75,7 @@ func getWaveAISettings(premium bool, builderMode bool, rtInfo waveobj.ObjRTInfo) if rtInfo.WaveAIMaxOutputTokens > 0 { maxTokens = rtInfo.WaveAIMaxOutputTokens } - thinkingMode, config, err := resolveThinkingMode(rtInfo.WaveAIThinkingMode, premium) + aiMode, config, err := resolveAIMode(rtInfo.WaveAIMode, premium) if err != nil { return nil, err } @@ -100,7 +100,7 @@ func getWaveAISettings(premium bool, builderMode bool, rtInfo waveobj.ObjRTInfo) } else if config.BaseURL != "" { baseUrl = config.BaseURL } else { - return nil, fmt.Errorf("no BaseURL configured for thinking mode %s", thinkingMode) + return nil, fmt.Errorf("no BaseURL configured for AI mode %s", aiMode) } opts := &uctypes.AIOptsType{ @@ -108,7 +108,7 @@ func getWaveAISettings(premium bool, builderMode bool, rtInfo waveobj.ObjRTInfo) Model: config.Model, MaxTokens: maxTokens, ThinkingLevel: config.ThinkingLevel, - ThinkingMode: thinkingMode, + AIMode: aiMode, BaseURL: baseUrl, Capabilities: config.Capabilities, } @@ -369,7 +369,7 @@ func RunAIChat(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseCha WidgetAccess: chatOpts.WidgetAccess, ToolDetail: make(map[string]int), ThinkingLevel: chatOpts.Config.ThinkingLevel, - ThinkingMode: chatOpts.Config.ThinkingMode, + AIMode: chatOpts.Config.AIMode, } firstStep := true var cont *uctypes.WaveContinueResponse @@ -568,7 +568,7 @@ func sendAIMetricsTelemetry(ctx context.Context, metrics *uctypes.AIMetrics) { WaveAIRequestDurMs: metrics.RequestDuration, WaveAIWidgetAccess: metrics.WidgetAccess, WaveAIThinkingLevel: metrics.ThinkingLevel, - WaveAIThinkingMode: metrics.ThinkingMode, + WaveAIMode: metrics.AIMode, }) _ = telemetry.RecordTEvent(ctx, event) } diff --git a/pkg/telemetry/telemetrydata/telemetrydata.go b/pkg/telemetry/telemetrydata/telemetrydata.go index 79ec3d6941..7dd7bffdb9 100644 --- a/pkg/telemetry/telemetrydata/telemetrydata.go +++ b/pkg/telemetry/telemetrydata/telemetrydata.go @@ -147,7 +147,7 @@ type TEventProps struct { WaveAIRequestDurMs int `json:"waveai:requestdurms,omitempty"` // ms WaveAIWidgetAccess bool `json:"waveai:widgetaccess,omitempty"` WaveAIThinkingLevel string `json:"waveai:thinkinglevel,omitempty"` - WaveAIThinkingMode string `json:"waveai:thinkingmode,omitempty"` + WaveAIMode string `json:"waveai:mode,omitempty"` WaveAIFeedback string `json:"waveai:feedback,omitempty" tstype:"\"good\" | \"bad\""` WaveAIAction string `json:"waveai:action,omitempty"` diff --git a/pkg/waveobj/objrtinfo.go b/pkg/waveobj/objrtinfo.go index ff88f7090c..77dadf9985 100644 --- a/pkg/waveobj/objrtinfo.go +++ b/pkg/waveobj/objrtinfo.go @@ -22,6 +22,6 @@ type ObjRTInfo struct { BuilderEnv map[string]string `json:"builder:env,omitempty"` WaveAIChatId string `json:"waveai:chatid,omitempty"` - WaveAIThinkingMode string `json:"waveai:thinkingmode,omitempty"` + WaveAIMode string `json:"waveai:mode,omitempty"` WaveAIMaxOutputTokens int `json:"waveai:maxoutputtokens,omitempty"` } diff --git a/pkg/wconfig/settingsconfig.go b/pkg/wconfig/settingsconfig.go index b45f9019fd..abded1f35e 100644 --- a/pkg/wconfig/settingsconfig.go +++ b/pkg/wconfig/settingsconfig.go @@ -257,7 +257,7 @@ type WebBookmark struct { DisplayOrder float64 `json:"display:order,omitempty"` } -type AIThinkingModeConfigType struct { +type AIModeConfigType struct { DisplayName string `json:"display:name"` DisplayOrder float64 `json:"display:order,omitempty"` DisplayIcon string `json:"display:icon"` @@ -276,16 +276,16 @@ type AIThinkingModeConfigType struct { } type FullConfigType struct { - Settings SettingsType `json:"settings" merge:"meta"` - MimeTypes map[string]MimeTypeConfigType `json:"mimetypes"` - DefaultWidgets map[string]WidgetConfigType `json:"defaultwidgets"` - Widgets map[string]WidgetConfigType `json:"widgets"` - Presets map[string]waveobj.MetaMapType `json:"presets"` - TermThemes map[string]TermThemeType `json:"termthemes"` - Connections map[string]ConnKeywords `json:"connections"` - Bookmarks map[string]WebBookmark `json:"bookmarks"` - WaveAIModes map[string]AIThinkingModeConfigType `json:"waveai"` - ConfigErrors []ConfigError `json:"configerrors" configfile:"-"` + Settings SettingsType `json:"settings" merge:"meta"` + MimeTypes map[string]MimeTypeConfigType `json:"mimetypes"` + DefaultWidgets map[string]WidgetConfigType `json:"defaultwidgets"` + Widgets map[string]WidgetConfigType `json:"widgets"` + Presets map[string]waveobj.MetaMapType `json:"presets"` + TermThemes map[string]TermThemeType `json:"termthemes"` + Connections map[string]ConnKeywords `json:"connections"` + Bookmarks map[string]WebBookmark `json:"bookmarks"` + WaveAIModes map[string]AIModeConfigType `json:"waveai"` + ConfigErrors []ConfigError `json:"configerrors" configfile:"-"` } type ConnKeywords struct { diff --git a/pkg/wshrpc/wshclient/wshclient.go b/pkg/wshrpc/wshclient/wshclient.go index 35b2be374b..f2dfc39336 100644 --- a/pkg/wshrpc/wshclient/wshclient.go +++ b/pkg/wshrpc/wshclient/wshclient.go @@ -732,8 +732,8 @@ func WaveAIEnableTelemetryCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) error } // command "waveaigetmodes", wshserver.WaveAIGetModesCommand -func WaveAIGetModesCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]uctypes.AIThinkingModeConfig, error) { - resp, err := sendRpcRequestCallHelper[[]uctypes.AIThinkingModeConfig](w, "waveaigetmodes", nil, opts) +func WaveAIGetModesCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]uctypes.AIModeConfig, error) { + resp, err := sendRpcRequestCallHelper[[]uctypes.AIModeConfig](w, "waveaigetmodes", nil, opts) return resp, err } diff --git a/pkg/wshrpc/wshrpctypes.go b/pkg/wshrpc/wshrpctypes.go index 72c081c484..97036a7168 100644 --- a/pkg/wshrpc/wshrpctypes.go +++ b/pkg/wshrpc/wshrpctypes.go @@ -321,7 +321,7 @@ type WshRpcInterface interface { WaveAIToolApproveCommand(ctx context.Context, data CommandWaveAIToolApproveData) error WaveAIAddContextCommand(ctx context.Context, data CommandWaveAIAddContextData) error WaveAIGetToolDiffCommand(ctx context.Context, data CommandWaveAIGetToolDiffData) (*CommandWaveAIGetToolDiffRtnData, error) - WaveAIGetModesCommand(ctx context.Context) ([]uctypes.AIThinkingModeConfig, error) + WaveAIGetModesCommand(ctx context.Context) ([]uctypes.AIModeConfig, error) // screenshot CaptureBlockScreenshotCommand(ctx context.Context, data CommandCaptureBlockScreenshotData) (string, error) diff --git a/pkg/wshrpc/wshserver/wshserver.go b/pkg/wshrpc/wshserver/wshserver.go index 10e5949a0d..c41f2fbb41 100644 --- a/pkg/wshrpc/wshserver/wshserver.go +++ b/pkg/wshrpc/wshserver/wshserver.go @@ -1262,7 +1262,7 @@ func (ws *WshServer) WaveAIGetToolDiffCommand(ctx context.Context, data wshrpc.C }, nil } -func (ws *WshServer) WaveAIGetModesCommand(ctx context.Context) ([]uctypes.AIThinkingModeConfig, error) { +func (ws *WshServer) WaveAIGetModesCommand(ctx context.Context) ([]uctypes.AIModeConfig, error) { return aiusechat.WaveAIGetModes() } From df4038869726b2792fbc3f1a0e4b7c943892cdb9 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 17:04:58 -0800 Subject: [PATCH 26/31] use ai mode from config instead of rpc --- frontend/app/aipanel/aimode.tsx | 40 ++++++++++++--------------- frontend/app/aipanel/waveai-model.tsx | 19 ++++--------- frontend/app/store/wshclientapi.ts | 5 ---- frontend/types/gotypes.d.ts | 19 ------------- pkg/aiusechat/usechat-mode.go | 40 --------------------------- pkg/wshrpc/wshclient/wshclient.go | 6 ---- pkg/wshrpc/wshrpctypes.go | 2 -- pkg/wshrpc/wshserver/wshserver.go | 4 --- 8 files changed, 23 insertions(+), 112 deletions(-) diff --git a/frontend/app/aipanel/aimode.tsx b/frontend/app/aipanel/aimode.tsx index a262ef1296..eec70e41e9 100644 --- a/frontend/app/aipanel/aimode.tsx +++ b/frontend/app/aipanel/aimode.tsx @@ -18,18 +18,19 @@ export const AIModeDropdown = memo(() => { const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0; const hideQuick = model.inBuilder && hasPremium; - const configsMap = aiModeConfigs.reduce( - (acc, config) => { - acc[config.mode] = config; - return acc; - }, - {} as Record - ); + const sortedConfigs = Object.entries(aiModeConfigs) + .map(([mode, config]) => ({ mode, ...config })) + .sort((a, b) => { + const orderDiff = (a["display:order"] || 0) - (b["display:order"] || 0); + if (orderDiff !== 0) return orderDiff; + return (a["display:name"] || "").localeCompare(b["display:name"] || ""); + }) + .filter((config) => !(hideQuick && config.mode === "waveai@quick")); const handleSelect = (mode: string) => { - const config = configsMap[mode]; + const config = aiModeConfigs[mode]; if (!config) return; - if (!hasPremium && config.premium) { + if (!hasPremium && config["waveai:premium"]) { return; } model.setAIMode(mode); @@ -37,9 +38,9 @@ export const AIModeDropdown = memo(() => { }; let currentMode = aiMode || "waveai@balanced"; - const currentConfig = configsMap[currentMode]; + const currentConfig = aiModeConfigs[currentMode]; if (currentConfig) { - if (!hasPremium && currentConfig.premium) { + if (!hasPremium && currentConfig["waveai:premium"]) { currentMode = "waveai@quick"; } if (hideQuick && currentMode === "waveai@quick") { @@ -47,7 +48,7 @@ export const AIModeDropdown = memo(() => { } } - const displayConfig = configsMap[currentMode] || { + const displayConfig = aiModeConfigs[currentMode] || { "display:name": "? Unknown", "display:icon": "question", }; @@ -73,17 +74,10 @@ export const AIModeDropdown = memo(() => { <>
setIsOpen(false)} />
- {aiModeConfigs - .sort( - (a, b) => - (a["display:order"] || 0) - (b["display:order"] || 0) || - (a["display:name"] || "").localeCompare(b["display:name"] || "") - ) - .filter((config) => !(hideQuick && config.mode === "waveai@quick")) - .map((config, index, filteredConfigs) => { + {sortedConfigs.map((config, index) => { const isFirst = index === 0; - const isLast = index === filteredConfigs.length - 1; - const isDisabled = !hasPremium && config.premium; + const isLast = index === sortedConfigs.length - 1; + const isDisabled = !hasPremium && config["waveai:premium"]; const isSelected = currentMode === config.mode; return (
- {config.description} + {config["display:description"]}
); diff --git a/frontend/app/aipanel/waveai-model.tsx b/frontend/app/aipanel/waveai-model.tsx index 21fbeefe8d..34e11ec5ce 100644 --- a/frontend/app/aipanel/waveai-model.tsx +++ b/frontend/app/aipanel/waveai-model.tsx @@ -58,7 +58,7 @@ export class WaveAIModel { droppedFiles: jotai.PrimitiveAtom = jotai.atom([]); chatId!: jotai.PrimitiveAtom; currentAIMode: jotai.PrimitiveAtom = jotai.atom("waveai@balanced"); - aiModeConfigs: jotai.PrimitiveAtom = jotai.atom([]); + aiModeConfigs!: jotai.Atom>; errorMessage: jotai.PrimitiveAtom = jotai.atom(null) as jotai.PrimitiveAtom; modelAtom!: jotai.Atom; containerWidth: jotai.PrimitiveAtom = jotai.atom(0); @@ -83,6 +83,11 @@ export class WaveAIModel { const modelMetaAtom = getOrefMetaKeyAtom(this.orefContext, "waveai:model"); return get(modelMetaAtom) ?? "gpt-5.1"; }); + this.aiModeConfigs = jotai.atom((get) => { + const fullConfig = get(atoms.fullConfigAtom); + return fullConfig?.waveai ?? {}; + }); + this.widgetAccessAtom = jotai.atom((get) => { if (this.inBuilder) { @@ -446,7 +451,6 @@ export class WaveAIModel { async uiLoadInitialChat() { globalStore.set(this.isLoadingChatAtom, true); - await this.loadAIModeConfigs(); const messages = await this.loadInitialChat(); this.useChatSetMessages?.(messages); globalStore.set(this.isLoadingChatAtom, false); @@ -455,17 +459,6 @@ export class WaveAIModel { }, 100); } - async loadAIModeConfigs() { - try { - const configs = await RpcApi.WaveAIGetModesCommand(TabRpcClient); - if (configs != null && configs.length > 0) { - globalStore.set(this.aiModeConfigs, configs); - } - } catch (error) { - console.error("Failed to load Wave AI mode configs:", error); - } - } - async ensureRateLimitSet() { const currentInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom); if (currentInfo != null) { diff --git a/frontend/app/store/wshclientapi.ts b/frontend/app/store/wshclientapi.ts index 212cc95f50..0715eae699 100644 --- a/frontend/app/store/wshclientapi.ts +++ b/frontend/app/store/wshclientapi.ts @@ -612,11 +612,6 @@ class RpcApiType { return client.wshRpcCall("waveaienabletelemetry", null, opts); } - // command "waveaigetmodes" [call] - WaveAIGetModesCommand(client: WshClient, opts?: RpcOpts): Promise { - return client.wshRpcCall("waveaigetmodes", null, opts); - } - // command "waveaigettooldiff" [call] WaveAIGetToolDiffCommand(client: WshClient, data: CommandWaveAIGetToolDiffData, opts?: RpcOpts): Promise { return client.wshRpcCall("waveaigettooldiff", data, opts); diff --git a/frontend/types/gotypes.d.ts b/frontend/types/gotypes.d.ts index 088db39b3b..d00e629b4a 100644 --- a/frontend/types/gotypes.d.ts +++ b/frontend/types/gotypes.d.ts @@ -13,25 +13,6 @@ declare global { data64: string; }; - // uctypes.AIModeConfig - type AIModeConfig = { - mode: string; - "display:name": string; - "display:order"?: number; - "display:icon": string; - apitype: string; - model: string; - thinkinglevel: string; - baseurl?: string; - waveaicloud?: boolean; - apiversion?: string; - apitoken?: string; - apitokensecretname?: string; - premium: boolean; - description: string; - capabilities?: string[]; - }; - // wconfig.AIModeConfigType type AIModeConfigType = { "display:name": string; diff --git a/pkg/aiusechat/usechat-mode.go b/pkg/aiusechat/usechat-mode.go index e2e5b9ec93..fe5bd2d786 100644 --- a/pkg/aiusechat/usechat-mode.go +++ b/pkg/aiusechat/usechat-mode.go @@ -5,39 +5,11 @@ package aiusechat import ( "fmt" - "sort" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" "github.com/wavetermdev/waveterm/pkg/wconfig" ) -func getAIModeConfigs() map[string]uctypes.AIModeConfig { - fullConfig := wconfig.GetWatcher().GetFullConfig() - configs := make(map[string]uctypes.AIModeConfig) - - for mode, cfg := range fullConfig.WaveAIModes { - configs[mode] = uctypes.AIModeConfig{ - Mode: mode, - DisplayName: cfg.DisplayName, - DisplayOrder: cfg.DisplayOrder, - DisplayIcon: cfg.DisplayIcon, - APIType: cfg.APIType, - Model: cfg.Model, - ThinkingLevel: cfg.ThinkingLevel, - BaseURL: cfg.BaseURL, - WaveAICloud: cfg.WaveAICloud, - APIVersion: cfg.APIVersion, - APIToken: cfg.APIToken, - APITokenSecretName: cfg.APITokenSecretName, - Premium: cfg.WaveAIPremium, - Description: cfg.DisplayDescription, - Capabilities: cfg.Capabilities, - } - } - - return configs -} - func resolveAIMode(requestedMode string, premium bool) (string, *wconfig.AIModeConfigType, error) { mode := requestedMode if mode == "" { @@ -69,15 +41,3 @@ func getAIModeConfig(aiMode string) (*wconfig.AIModeConfigType, error) { return &config, nil } - -func WaveAIGetModes() ([]uctypes.AIModeConfig, error) { - configs := getAIModeConfigs() - modes := make([]uctypes.AIModeConfig, 0, len(configs)) - for _, config := range configs { - modes = append(modes, config) - } - sort.Slice(modes, func(i, j int) bool { - return modes[i].DisplayOrder < modes[j].DisplayOrder - }) - return modes, nil -} diff --git a/pkg/wshrpc/wshclient/wshclient.go b/pkg/wshrpc/wshclient/wshclient.go index f2dfc39336..10f2d17540 100644 --- a/pkg/wshrpc/wshclient/wshclient.go +++ b/pkg/wshrpc/wshclient/wshclient.go @@ -731,12 +731,6 @@ func WaveAIEnableTelemetryCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) error return err } -// command "waveaigetmodes", wshserver.WaveAIGetModesCommand -func WaveAIGetModesCommand(w *wshutil.WshRpc, opts *wshrpc.RpcOpts) ([]uctypes.AIModeConfig, error) { - resp, err := sendRpcRequestCallHelper[[]uctypes.AIModeConfig](w, "waveaigetmodes", nil, opts) - return resp, err -} - // command "waveaigettooldiff", wshserver.WaveAIGetToolDiffCommand func WaveAIGetToolDiffCommand(w *wshutil.WshRpc, data wshrpc.CommandWaveAIGetToolDiffData, opts *wshrpc.RpcOpts) (*wshrpc.CommandWaveAIGetToolDiffRtnData, error) { resp, err := sendRpcRequestCallHelper[*wshrpc.CommandWaveAIGetToolDiffRtnData](w, "waveaigettooldiff", data, opts) diff --git a/pkg/wshrpc/wshrpctypes.go b/pkg/wshrpc/wshrpctypes.go index 97036a7168..0ce53d257f 100644 --- a/pkg/wshrpc/wshrpctypes.go +++ b/pkg/wshrpc/wshrpctypes.go @@ -148,7 +148,6 @@ const ( Command_WaveAIToolApprove = "waveaitoolapprove" Command_WaveAIAddContext = "waveaiaddcontext" Command_WaveAIGetToolDiff = "waveaigettooldiff" - Command_WaveAIGetModes = "waveaigetmodes" Command_CaptureBlockScreenshot = "captureblockscreenshot" @@ -321,7 +320,6 @@ type WshRpcInterface interface { WaveAIToolApproveCommand(ctx context.Context, data CommandWaveAIToolApproveData) error WaveAIAddContextCommand(ctx context.Context, data CommandWaveAIAddContextData) error WaveAIGetToolDiffCommand(ctx context.Context, data CommandWaveAIGetToolDiffData) (*CommandWaveAIGetToolDiffRtnData, error) - WaveAIGetModesCommand(ctx context.Context) ([]uctypes.AIModeConfig, error) // screenshot CaptureBlockScreenshotCommand(ctx context.Context, data CommandCaptureBlockScreenshotData) (string, error) diff --git a/pkg/wshrpc/wshserver/wshserver.go b/pkg/wshrpc/wshserver/wshserver.go index c41f2fbb41..6f6c2afc7a 100644 --- a/pkg/wshrpc/wshserver/wshserver.go +++ b/pkg/wshrpc/wshserver/wshserver.go @@ -1262,10 +1262,6 @@ func (ws *WshServer) WaveAIGetToolDiffCommand(ctx context.Context, data wshrpc.C }, nil } -func (ws *WshServer) WaveAIGetModesCommand(ctx context.Context) ([]uctypes.AIModeConfig, error) { - return aiusechat.WaveAIGetModes() -} - var wshActivityRe = regexp.MustCompile(`^[a-z:#]+$`) func (ws *WshServer) WshActivityCommand(ctx context.Context, data map[string]int) error { From 2c85b5028bfb43c32ba2c9e900659072d266f3d7 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 17:57:07 -0800 Subject: [PATCH 27/31] fix formatting --- frontend/app/aipanel/aimode.tsx | 62 ++++++++++++++++----------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/frontend/app/aipanel/aimode.tsx b/frontend/app/aipanel/aimode.tsx index eec70e41e9..cf04851cdb 100644 --- a/frontend/app/aipanel/aimode.tsx +++ b/frontend/app/aipanel/aimode.tsx @@ -75,37 +75,37 @@ export const AIModeDropdown = memo(() => {
setIsOpen(false)} />
{sortedConfigs.map((config, index) => { - const isFirst = index === 0; - const isLast = index === sortedConfigs.length - 1; - const isDisabled = !hasPremium && config["waveai:premium"]; - const isSelected = currentMode === config.mode; - return ( - - ); - })} + const isFirst = index === 0; + const isLast = index === sortedConfigs.length - 1; + const isDisabled = !hasPremium && config["waveai:premium"]; + const isSelected = currentMode === config.mode; + return ( + + ); + })}
)} From f07c7aaf58bbb6c0797214b8bdefe6e2e85d0041 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 20:21:22 -0800 Subject: [PATCH 28/31] remove all hard coded APIType strings (unify to uctypes) --- cmd/testai/main-testai.go | 8 ++++---- pkg/aiusechat/anthropic/anthropic-backend.go | 2 +- pkg/aiusechat/anthropic/anthropic-convertmessage.go | 6 +++--- pkg/aiusechat/openai/openai-backend.go | 2 +- pkg/aiusechat/openai/openai-convertmessage.go | 6 +++--- .../openaicomp/openaicomp-convertmessage.go | 2 +- pkg/aiusechat/openaicomp/openaicomp-types.go | 2 +- pkg/aiusechat/uctypes/uctypes.go | 8 +++++++- pkg/aiusechat/usechat-backend.go | 6 +++--- pkg/aiusechat/usechat.go | 12 +++--------- 10 files changed, 27 insertions(+), 27 deletions(-) diff --git a/cmd/testai/main-testai.go b/cmd/testai/main-testai.go index 226c038ebd..9e33195cfc 100644 --- a/cmd/testai/main-testai.go +++ b/cmd/testai/main-testai.go @@ -114,7 +114,7 @@ func testOpenAI(ctx context.Context, model, message string, tools []uctypes.Tool } opts := &uctypes.AIOptsType{ - APIType: aiusechat.APIType_OpenAI, + APIType: uctypes.APIType_OpenAI, APIToken: apiKey, Model: model, MaxTokens: 4096, @@ -164,7 +164,7 @@ func testOpenAIComp(ctx context.Context, model, message string, tools []uctypes. } opts := &uctypes.AIOptsType{ - APIType: aiusechat.APIType_OpenAIComp, + APIType: uctypes.APIType_OpenAIComp, APIToken: apiKey, BaseURL: "https://api.openai.com/v1/chat/completions", Model: model, @@ -214,7 +214,7 @@ func testOpenRouter(ctx context.Context, model, message string, tools []uctypes. } opts := &uctypes.AIOptsType{ - APIType: aiusechat.APIType_OpenAIComp, + APIType: uctypes.APIType_OpenAIComp, APIToken: apiKey, BaseURL: "https://openrouter.ai/api/v1/chat/completions", Model: model, @@ -264,7 +264,7 @@ func testAnthropic(ctx context.Context, model, message string, tools []uctypes.T } opts := &uctypes.AIOptsType{ - APIType: aiusechat.APIType_Anthropic, + APIType: uctypes.APIType_Anthropic, APIToken: apiKey, Model: model, MaxTokens: 4096, diff --git a/pkg/aiusechat/anthropic/anthropic-backend.go b/pkg/aiusechat/anthropic/anthropic-backend.go index 345d30bcdd..9de484a8f4 100644 --- a/pkg/aiusechat/anthropic/anthropic-backend.go +++ b/pkg/aiusechat/anthropic/anthropic-backend.go @@ -56,7 +56,7 @@ func (m *anthropicChatMessage) GetUsage() *uctypes.AIUsage { } return &uctypes.AIUsage{ - APIType: "anthropic", + APIType: uctypes.APIType_Anthropic, Model: m.Usage.Model, InputTokens: m.Usage.InputTokens, OutputTokens: m.Usage.OutputTokens, diff --git a/pkg/aiusechat/anthropic/anthropic-convertmessage.go b/pkg/aiusechat/anthropic/anthropic-convertmessage.go index 0daf9f99b9..3c7e4fbb75 100644 --- a/pkg/aiusechat/anthropic/anthropic-convertmessage.go +++ b/pkg/aiusechat/anthropic/anthropic-convertmessage.go @@ -171,7 +171,7 @@ func buildAnthropicHTTPRequest(ctx context.Context, msgs []anthropicInputMessage req.Header.Set("anthropic-version", apiVersion) req.Header.Set("accept", "text/event-stream") req.Header.Set("X-Wave-ClientId", chatOpts.ClientId) - req.Header.Set("X-Wave-APIType", "anthropic") + req.Header.Set("X-Wave-APIType", uctypes.APIType_Anthropic) return req, nil } @@ -795,8 +795,8 @@ func ConvertToolResultsToAnthropicChatMessage(toolResults []uctypes.AIToolResult // ConvertAIChatToUIChat converts an AIChat to a UIChat for Anthropic func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { - if aiChat.APIType != "anthropic" { - return nil, fmt.Errorf("APIType must be 'anthropic', got '%s'", aiChat.APIType) + if aiChat.APIType != uctypes.APIType_Anthropic { + return nil, fmt.Errorf("APIType must be '%s', got '%s'", uctypes.APIType_Anthropic, aiChat.APIType) } uiMessages := make([]uctypes.UIMessage, 0, len(aiChat.NativeMessages)) diff --git a/pkg/aiusechat/openai/openai-backend.go b/pkg/aiusechat/openai/openai-backend.go index 356384f579..81c0e929f1 100644 --- a/pkg/aiusechat/openai/openai-backend.go +++ b/pkg/aiusechat/openai/openai-backend.go @@ -149,7 +149,7 @@ func (m *OpenAIChatMessage) GetUsage() *uctypes.AIUsage { return nil } return &uctypes.AIUsage{ - APIType: "openai", + APIType: uctypes.APIType_OpenAI, Model: m.Usage.Model, InputTokens: m.Usage.InputTokens, OutputTokens: m.Usage.OutputTokens, diff --git a/pkg/aiusechat/openai/openai-convertmessage.go b/pkg/aiusechat/openai/openai-convertmessage.go index 156c635887..591b67f944 100644 --- a/pkg/aiusechat/openai/openai-convertmessage.go +++ b/pkg/aiusechat/openai/openai-convertmessage.go @@ -299,7 +299,7 @@ func buildOpenAIHTTPRequest(ctx context.Context, inputs []any, chatOpts uctypes. req.Header.Set("X-Wave-ChatId", chatOpts.ChatId) } req.Header.Set("X-Wave-Version", wavebase.WaveVersion) - req.Header.Set("X-Wave-APIType", "openai") + req.Header.Set("X-Wave-APIType", uctypes.APIType_OpenAI) req.Header.Set("X-Wave-RequestType", chatOpts.GetWaveRequestType()) return req, nil @@ -519,8 +519,8 @@ func (m *OpenAIChatMessage) convertToUIMessage() *uctypes.UIMessage { // ConvertAIChatToUIChat converts an AIChat to a UIChat for OpenAI func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { - if aiChat.APIType != "openai" { - return nil, fmt.Errorf("APIType must be 'openai', got '%s'", aiChat.APIType) + if aiChat.APIType != uctypes.APIType_OpenAI { + return nil, fmt.Errorf("APIType must be '%s', got '%s'", uctypes.APIType_OpenAI, aiChat.APIType) } uiMessages := make([]uctypes.UIMessage, 0, len(aiChat.NativeMessages)) for i, nativeMsg := range aiChat.NativeMessages { diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go index 896f88d75a..130e7fea4d 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -139,7 +139,7 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess req.Header.Set("X-Wave-ChatId", chatOpts.ChatId) } req.Header.Set("X-Wave-Version", wavebase.WaveVersion) - req.Header.Set("X-Wave-APIType", "openai-comp") + req.Header.Set("X-Wave-APIType", uctypes.APIType_OpenAIComp) req.Header.Set("X-Wave-RequestType", chatOpts.GetWaveRequestType()) return req, nil diff --git a/pkg/aiusechat/openaicomp/openaicomp-types.go b/pkg/aiusechat/openaicomp/openaicomp-types.go index 7b8a64445e..390b13158e 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-types.go +++ b/pkg/aiusechat/openaicomp/openaicomp-types.go @@ -141,7 +141,7 @@ func (m *CompletionsChatMessage) GetUsage() *uctypes.AIUsage { return nil } return &uctypes.AIUsage{ - APIType: "openai-comp", + APIType: uctypes.APIType_OpenAIComp, Model: m.Usage.Model, InputTokens: m.Usage.PromptTokens, OutputTokens: m.Usage.CompletionTokens, diff --git a/pkg/aiusechat/uctypes/uctypes.go b/pkg/aiusechat/uctypes/uctypes.go index 5fea0aa43f..a3b4c5a751 100644 --- a/pkg/aiusechat/uctypes/uctypes.go +++ b/pkg/aiusechat/uctypes/uctypes.go @@ -15,6 +15,12 @@ const DefaultAnthropicModel = "claude-sonnet-4-5" const DefaultOpenAIModel = "gpt-5-mini" const PremiumOpenAIModel = "gpt-5.1" +const ( + APIType_Anthropic = "anthropic" + APIType_OpenAI = "openai" + APIType_OpenAIComp = "openai-comp" +) + type UseChatRequest struct { Messages []UIMessage `json:"messages"` } @@ -606,7 +612,7 @@ func AreModelsCompatible(apiType, model1, model2 string) bool { return true } - if apiType == "openai" { + if apiType == APIType_OpenAI { gpt5Models := map[string]bool{ "gpt-5.1": true, "gpt-5": true, diff --git a/pkg/aiusechat/usechat-backend.go b/pkg/aiusechat/usechat-backend.go index 71cbdccd72..c025df76a0 100644 --- a/pkg/aiusechat/usechat-backend.go +++ b/pkg/aiusechat/usechat-backend.go @@ -58,11 +58,11 @@ var _ UseChatBackend = (*anthropicBackend)(nil) // GetBackendByAPIType returns the appropriate UseChatBackend implementation for the given API type func GetBackendByAPIType(apiType string) (UseChatBackend, error) { switch apiType { - case APIType_OpenAI: + case uctypes.APIType_OpenAI: return &openaiResponsesBackend{}, nil - case APIType_OpenAIComp: + case uctypes.APIType_OpenAIComp: return &openaiCompletionsBackend{}, nil - case APIType_Anthropic: + case uctypes.APIType_Anthropic: return &anthropicBackend{}, nil default: return nil, fmt.Errorf("unsupported API type: %s", apiType) diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index 64cb97d052..bd8f8228c5 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -35,13 +35,7 @@ import ( "github.com/wavetermdev/waveterm/pkg/wstore" ) -const ( - APIType_Anthropic = "anthropic" - APIType_OpenAI = "openai" - APIType_OpenAIComp = "openai-comp" -) - -const DefaultAPI = APIType_OpenAI +const DefaultAPI = uctypes.APIType_OpenAI const DefaultMaxTokens = 4 * 1024 const BuilderMaxTokens = 24 * 1024 const WaveAIEndpointEnvName = "WAVETERM_WAVEAI_ENDPOINT" @@ -164,7 +158,7 @@ func GetGlobalRateLimit() *uctypes.RateLimitInfo { } func runAIChatStep(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseChatBackend, chatOpts uctypes.WaveChatOpts, cont *uctypes.WaveContinueResponse) (*uctypes.WaveStopReason, []uctypes.GenAIMessage, error) { - if chatOpts.Config.APIType == APIType_OpenAI && shouldUseChatCompletionsAPI(chatOpts.Config.Model) { + if chatOpts.Config.APIType == uctypes.APIType_OpenAI && shouldUseChatCompletionsAPI(chatOpts.Config.Model) { return nil, nil, fmt.Errorf("Chat completions API not available (must use newer OpenAI models)") } stopReason, messages, rateLimitInfo, err := backend.RunChatStep(ctx, sseHandler, chatOpts, cont) @@ -424,7 +418,7 @@ func RunAIChat(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseCha } } firstStep = false - if stopReason != nil && stopReason.Kind == uctypes.StopKindPremiumRateLimit && chatOpts.Config.APIType == APIType_OpenAI && chatOpts.Config.Model == uctypes.PremiumOpenAIModel { + if stopReason != nil && stopReason.Kind == uctypes.StopKindPremiumRateLimit && chatOpts.Config.APIType == uctypes.APIType_OpenAI && chatOpts.Config.Model == uctypes.PremiumOpenAIModel { log.Printf("Premium rate limit hit with gpt-5.1, switching to gpt-5-mini\n") cont = &uctypes.WaveContinueResponse{ Model: uctypes.DefaultOpenAIModel, From c94e40758498c02cb6880895d2ab03d7ade2f753 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 21:03:30 -0800 Subject: [PATCH 29/31] update apitype names --- cmd/testai/main-testai.go | 8 ++++---- pkg/aiusechat/anthropic/anthropic-backend.go | 2 +- pkg/aiusechat/anthropic/anthropic-convertmessage.go | 6 +++--- pkg/aiusechat/openai/openai-backend.go | 2 +- pkg/aiusechat/openai/openai-convertmessage.go | 6 +++--- pkg/aiusechat/openaicomp/openaicomp-convertmessage.go | 2 +- pkg/aiusechat/openaicomp/openaicomp-types.go | 2 +- pkg/aiusechat/uctypes/uctypes.go | 8 ++++---- pkg/aiusechat/usechat-backend.go | 6 +++--- pkg/aiusechat/usechat-prompts.go | 7 +++---- pkg/aiusechat/usechat.go | 6 +++--- pkg/wconfig/defaultconfig/waveai.json | 6 +++--- 12 files changed, 30 insertions(+), 31 deletions(-) diff --git a/cmd/testai/main-testai.go b/cmd/testai/main-testai.go index 9e33195cfc..8e8fcdb3eb 100644 --- a/cmd/testai/main-testai.go +++ b/cmd/testai/main-testai.go @@ -114,7 +114,7 @@ func testOpenAI(ctx context.Context, model, message string, tools []uctypes.Tool } opts := &uctypes.AIOptsType{ - APIType: uctypes.APIType_OpenAI, + APIType: uctypes.APIType_OpenAIResponses, APIToken: apiKey, Model: model, MaxTokens: 4096, @@ -164,7 +164,7 @@ func testOpenAIComp(ctx context.Context, model, message string, tools []uctypes. } opts := &uctypes.AIOptsType{ - APIType: uctypes.APIType_OpenAIComp, + APIType: uctypes.APIType_OpenAIChat, APIToken: apiKey, BaseURL: "https://api.openai.com/v1/chat/completions", Model: model, @@ -214,7 +214,7 @@ func testOpenRouter(ctx context.Context, model, message string, tools []uctypes. } opts := &uctypes.AIOptsType{ - APIType: uctypes.APIType_OpenAIComp, + APIType: uctypes.APIType_OpenAIChat, APIToken: apiKey, BaseURL: "https://openrouter.ai/api/v1/chat/completions", Model: model, @@ -264,7 +264,7 @@ func testAnthropic(ctx context.Context, model, message string, tools []uctypes.T } opts := &uctypes.AIOptsType{ - APIType: uctypes.APIType_Anthropic, + APIType: uctypes.APIType_AnthropicMessages, APIToken: apiKey, Model: model, MaxTokens: 4096, diff --git a/pkg/aiusechat/anthropic/anthropic-backend.go b/pkg/aiusechat/anthropic/anthropic-backend.go index 9de484a8f4..987b8c117e 100644 --- a/pkg/aiusechat/anthropic/anthropic-backend.go +++ b/pkg/aiusechat/anthropic/anthropic-backend.go @@ -56,7 +56,7 @@ func (m *anthropicChatMessage) GetUsage() *uctypes.AIUsage { } return &uctypes.AIUsage{ - APIType: uctypes.APIType_Anthropic, + APIType: uctypes.APIType_AnthropicMessages, Model: m.Usage.Model, InputTokens: m.Usage.InputTokens, OutputTokens: m.Usage.OutputTokens, diff --git a/pkg/aiusechat/anthropic/anthropic-convertmessage.go b/pkg/aiusechat/anthropic/anthropic-convertmessage.go index 3c7e4fbb75..e8a64f3246 100644 --- a/pkg/aiusechat/anthropic/anthropic-convertmessage.go +++ b/pkg/aiusechat/anthropic/anthropic-convertmessage.go @@ -171,7 +171,7 @@ func buildAnthropicHTTPRequest(ctx context.Context, msgs []anthropicInputMessage req.Header.Set("anthropic-version", apiVersion) req.Header.Set("accept", "text/event-stream") req.Header.Set("X-Wave-ClientId", chatOpts.ClientId) - req.Header.Set("X-Wave-APIType", uctypes.APIType_Anthropic) + req.Header.Set("X-Wave-APIType", uctypes.APIType_AnthropicMessages) return req, nil } @@ -795,8 +795,8 @@ func ConvertToolResultsToAnthropicChatMessage(toolResults []uctypes.AIToolResult // ConvertAIChatToUIChat converts an AIChat to a UIChat for Anthropic func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { - if aiChat.APIType != uctypes.APIType_Anthropic { - return nil, fmt.Errorf("APIType must be '%s', got '%s'", uctypes.APIType_Anthropic, aiChat.APIType) + if aiChat.APIType != uctypes.APIType_AnthropicMessages { + return nil, fmt.Errorf("APIType must be '%s', got '%s'", uctypes.APIType_AnthropicMessages, aiChat.APIType) } uiMessages := make([]uctypes.UIMessage, 0, len(aiChat.NativeMessages)) diff --git a/pkg/aiusechat/openai/openai-backend.go b/pkg/aiusechat/openai/openai-backend.go index 81c0e929f1..eb3ac08ee2 100644 --- a/pkg/aiusechat/openai/openai-backend.go +++ b/pkg/aiusechat/openai/openai-backend.go @@ -149,7 +149,7 @@ func (m *OpenAIChatMessage) GetUsage() *uctypes.AIUsage { return nil } return &uctypes.AIUsage{ - APIType: uctypes.APIType_OpenAI, + APIType: uctypes.APIType_OpenAIResponses, Model: m.Usage.Model, InputTokens: m.Usage.InputTokens, OutputTokens: m.Usage.OutputTokens, diff --git a/pkg/aiusechat/openai/openai-convertmessage.go b/pkg/aiusechat/openai/openai-convertmessage.go index 591b67f944..70b6f31aa6 100644 --- a/pkg/aiusechat/openai/openai-convertmessage.go +++ b/pkg/aiusechat/openai/openai-convertmessage.go @@ -299,7 +299,7 @@ func buildOpenAIHTTPRequest(ctx context.Context, inputs []any, chatOpts uctypes. req.Header.Set("X-Wave-ChatId", chatOpts.ChatId) } req.Header.Set("X-Wave-Version", wavebase.WaveVersion) - req.Header.Set("X-Wave-APIType", uctypes.APIType_OpenAI) + req.Header.Set("X-Wave-APIType", uctypes.APIType_OpenAIResponses) req.Header.Set("X-Wave-RequestType", chatOpts.GetWaveRequestType()) return req, nil @@ -519,8 +519,8 @@ func (m *OpenAIChatMessage) convertToUIMessage() *uctypes.UIMessage { // ConvertAIChatToUIChat converts an AIChat to a UIChat for OpenAI func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { - if aiChat.APIType != uctypes.APIType_OpenAI { - return nil, fmt.Errorf("APIType must be '%s', got '%s'", uctypes.APIType_OpenAI, aiChat.APIType) + if aiChat.APIType != uctypes.APIType_OpenAIResponses { + return nil, fmt.Errorf("APIType must be '%s', got '%s'", uctypes.APIType_OpenAIResponses, aiChat.APIType) } uiMessages := make([]uctypes.UIMessage, 0, len(aiChat.NativeMessages)) for i, nativeMsg := range aiChat.NativeMessages { diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go index 130e7fea4d..17df4220d2 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go @@ -139,7 +139,7 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess req.Header.Set("X-Wave-ChatId", chatOpts.ChatId) } req.Header.Set("X-Wave-Version", wavebase.WaveVersion) - req.Header.Set("X-Wave-APIType", uctypes.APIType_OpenAIComp) + req.Header.Set("X-Wave-APIType", uctypes.APIType_OpenAIChat) req.Header.Set("X-Wave-RequestType", chatOpts.GetWaveRequestType()) return req, nil diff --git a/pkg/aiusechat/openaicomp/openaicomp-types.go b/pkg/aiusechat/openaicomp/openaicomp-types.go index 390b13158e..f1ba549653 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-types.go +++ b/pkg/aiusechat/openaicomp/openaicomp-types.go @@ -141,7 +141,7 @@ func (m *CompletionsChatMessage) GetUsage() *uctypes.AIUsage { return nil } return &uctypes.AIUsage{ - APIType: uctypes.APIType_OpenAIComp, + APIType: uctypes.APIType_OpenAIChat, Model: m.Usage.Model, InputTokens: m.Usage.PromptTokens, OutputTokens: m.Usage.CompletionTokens, diff --git a/pkg/aiusechat/uctypes/uctypes.go b/pkg/aiusechat/uctypes/uctypes.go index a3b4c5a751..9fc6a73f53 100644 --- a/pkg/aiusechat/uctypes/uctypes.go +++ b/pkg/aiusechat/uctypes/uctypes.go @@ -16,9 +16,9 @@ const DefaultOpenAIModel = "gpt-5-mini" const PremiumOpenAIModel = "gpt-5.1" const ( - APIType_Anthropic = "anthropic" - APIType_OpenAI = "openai" - APIType_OpenAIComp = "openai-comp" + APIType_AnthropicMessages = "anthropic-messages" + APIType_OpenAIResponses = "openai-responses" + APIType_OpenAIChat = "openai-chat" ) type UseChatRequest struct { @@ -612,7 +612,7 @@ func AreModelsCompatible(apiType, model1, model2 string) bool { return true } - if apiType == APIType_OpenAI { + if apiType == APIType_OpenAIResponses { gpt5Models := map[string]bool{ "gpt-5.1": true, "gpt-5": true, diff --git a/pkg/aiusechat/usechat-backend.go b/pkg/aiusechat/usechat-backend.go index c025df76a0..b0f07d1c2c 100644 --- a/pkg/aiusechat/usechat-backend.go +++ b/pkg/aiusechat/usechat-backend.go @@ -58,11 +58,11 @@ var _ UseChatBackend = (*anthropicBackend)(nil) // GetBackendByAPIType returns the appropriate UseChatBackend implementation for the given API type func GetBackendByAPIType(apiType string) (UseChatBackend, error) { switch apiType { - case uctypes.APIType_OpenAI: + case uctypes.APIType_OpenAIResponses: return &openaiResponsesBackend{}, nil - case uctypes.APIType_OpenAIComp: + case uctypes.APIType_OpenAIChat: return &openaiCompletionsBackend{}, nil - case uctypes.APIType_Anthropic: + case uctypes.APIType_AnthropicMessages: return &anthropicBackend{}, nil default: return nil, fmt.Errorf("unsupported API type: %s", apiType) diff --git a/pkg/aiusechat/usechat-prompts.go b/pkg/aiusechat/usechat-prompts.go index f3638afce9..3761995a62 100644 --- a/pkg/aiusechat/usechat-prompts.go +++ b/pkg/aiusechat/usechat-prompts.go @@ -17,9 +17,8 @@ var SystemPromptText_OpenAI = strings.Join([]string{ `You appear as a pull-out panel on the left; widgets are on the right.`, // Capabilities & truthfulness - `Tools define your only capabilities. If a capability is not provided by a tool, you cannot do it.`, - `Context from widgets is read-only unless a tool explicitly grants interaction.`, - `Never fabricate data. If you lack data or access, say so and offer the next best step (e.g., suggest enabling a tool).`, + `Tools define your only capabilities. If a capability is not provided by a tool, you cannot do it. Never fabricate data or pretend to call tools. If you lack data or access, say so directly and suggest the next best step.`, + `Use read-only tools (capture_screenshot, read_text_file, read_dir, term_get_scrollback) automatically whenever they help answer the user's request. When a user clearly expresses intent to modify something (write/edit/delete files), call the corresponding tool directly.`, // Crisp behavior `Be concise and direct. Prefer determinism over speculation. If a brief clarifying question eliminates guesswork, ask it.`, @@ -53,7 +52,7 @@ var SystemPromptText_OpenAI = strings.Join([]string{ var SystemPromptText_StrictToolAddOn = `## Tool Call Rules (STRICT) -When you decide a file write/edit tool call is needed: +When you decide a file write/edit tool call is needed:' - Output ONLY the tool call. - Do NOT include any explanation, summary, or file content in the chat. diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index bd8f8228c5..76895c7209 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -35,7 +35,7 @@ import ( "github.com/wavetermdev/waveterm/pkg/wstore" ) -const DefaultAPI = uctypes.APIType_OpenAI +const DefaultAPI = uctypes.APIType_OpenAIResponses const DefaultMaxTokens = 4 * 1024 const BuilderMaxTokens = 24 * 1024 const WaveAIEndpointEnvName = "WAVETERM_WAVEAI_ENDPOINT" @@ -158,7 +158,7 @@ func GetGlobalRateLimit() *uctypes.RateLimitInfo { } func runAIChatStep(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseChatBackend, chatOpts uctypes.WaveChatOpts, cont *uctypes.WaveContinueResponse) (*uctypes.WaveStopReason, []uctypes.GenAIMessage, error) { - if chatOpts.Config.APIType == uctypes.APIType_OpenAI && shouldUseChatCompletionsAPI(chatOpts.Config.Model) { + if chatOpts.Config.APIType == uctypes.APIType_OpenAIResponses && shouldUseChatCompletionsAPI(chatOpts.Config.Model) { return nil, nil, fmt.Errorf("Chat completions API not available (must use newer OpenAI models)") } stopReason, messages, rateLimitInfo, err := backend.RunChatStep(ctx, sseHandler, chatOpts, cont) @@ -418,7 +418,7 @@ func RunAIChat(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseCha } } firstStep = false - if stopReason != nil && stopReason.Kind == uctypes.StopKindPremiumRateLimit && chatOpts.Config.APIType == uctypes.APIType_OpenAI && chatOpts.Config.Model == uctypes.PremiumOpenAIModel { + if stopReason != nil && stopReason.Kind == uctypes.StopKindPremiumRateLimit && chatOpts.Config.APIType == uctypes.APIType_OpenAIResponses && chatOpts.Config.Model == uctypes.PremiumOpenAIModel { log.Printf("Premium rate limit hit with gpt-5.1, switching to gpt-5-mini\n") cont = &uctypes.WaveContinueResponse{ Model: uctypes.DefaultOpenAIModel, diff --git a/pkg/wconfig/defaultconfig/waveai.json b/pkg/wconfig/defaultconfig/waveai.json index b69cc2d041..03e51f3e64 100644 --- a/pkg/wconfig/defaultconfig/waveai.json +++ b/pkg/wconfig/defaultconfig/waveai.json @@ -5,7 +5,7 @@ "display:icon": "bolt", "display:shortdesc": "gpt-5-mini", "display:description": "Fastest responses (gpt-5-mini)", - "ai:apitype": "openai", + "ai:apitype": "openai-responses", "ai:model": "gpt-5-mini", "ai:thinkinglevel": "low", "ai:capabilities": ["tools", "images", "pdfs"], @@ -18,7 +18,7 @@ "display:icon": "sparkles", "display:shortdesc": "gpt-5.1, low thinking", "display:description": "Good mix of speed and accuracy\n(gpt-5.1 with minimal thinking)", - "ai:apitype": "openai", + "ai:apitype": "openai-responses", "ai:model": "gpt-5.1", "ai:thinkinglevel": "low", "ai:capabilities": ["tools", "images", "pdfs"], @@ -31,7 +31,7 @@ "display:icon": "lightbulb", "display:shortdesc": "gpt-5.1, full thinking", "display:description": "Slower but most capable\n(gpt-5.1 with full reasoning)", - "ai:apitype": "openai", + "ai:apitype": "openai-responses", "ai:model": "gpt-5.1", "ai:thinkinglevel": "medium", "ai:capabilities": ["tools", "images", "pdfs"], From 872eee57d2ddb52a4706f3922059f30a663d0127 Mon Sep 17 00:00:00 2001 From: sawka Date: Tue, 25 Nov 2025 21:16:26 -0800 Subject: [PATCH 30/31] big rename. openaicomp => openaichat --- .../openaichat-backend.go} | 38 +++++------ .../openaichat-convertmessage.go} | 64 +++++++++---------- .../openaichat-types.go} | 44 ++++++------- pkg/aiusechat/usechat-backend.go | 14 ++-- 4 files changed, 80 insertions(+), 80 deletions(-) rename pkg/aiusechat/{openaicomp/openaicomp-backend.go => openaichat/openaichat-backend.go} (83%) rename pkg/aiusechat/{openaicomp/openaicomp-convertmessage.go => openaichat/openaichat-convertmessage.go} (82%) rename pkg/aiusechat/{openaicomp/openaicomp-types.go => openaichat/openaichat-types.go} (79%) diff --git a/pkg/aiusechat/openaicomp/openaicomp-backend.go b/pkg/aiusechat/openaichat/openaichat-backend.go similarity index 83% rename from pkg/aiusechat/openaicomp/openaicomp-backend.go rename to pkg/aiusechat/openaichat/openaichat-backend.go index 188319e370..4eb6217421 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-backend.go +++ b/pkg/aiusechat/openaichat/openaichat-backend.go @@ -1,7 +1,7 @@ // Copyright 2025, Command Line Inc. // SPDX-License-Identifier: Apache-2.0 -package openaicomp +package openaichat import ( "context" @@ -21,13 +21,13 @@ import ( "github.com/wavetermdev/waveterm/pkg/web/sse" ) -// RunCompletionsChatStep executes a chat step using the completions API -func RunCompletionsChatStep( +// RunChatStep executes a chat step using the chat completions API +func RunChatStep( ctx context.Context, sseHandler *sse.SSEHandlerCh, chatOpts uctypes.WaveChatOpts, cont *uctypes.WaveContinueResponse, -) (*uctypes.WaveStopReason, []*CompletionsChatMessage, *uctypes.RateLimitInfo, error) { +) (*uctypes.WaveStopReason, []*StoredChatMessage, *uctypes.RateLimitInfo, error) { if sseHandler == nil { return nil, nil, nil, errors.New("sse handler is nil") } @@ -43,12 +43,12 @@ func RunCompletionsChatStep( defer cancel() } - // Convert stored messages to completions format - var messages []CompletionsMessage + // Convert stored messages to chat completions format + var messages []ChatRequestMessage // Add system prompt if provided if len(chatOpts.SystemPrompt) > 0 { - messages = append(messages, CompletionsMessage{ + messages = append(messages, ChatRequestMessage{ Role: "system", Content: strings.Join(chatOpts.SystemPrompt, "\n"), }) @@ -56,14 +56,14 @@ func RunCompletionsChatStep( // Convert native messages for _, genMsg := range chat.NativeMessages { - compMsg, ok := genMsg.(*CompletionsChatMessage) + chatMsg, ok := genMsg.(*StoredChatMessage) if !ok { - return nil, nil, nil, fmt.Errorf("expected CompletionsChatMessage, got %T", genMsg) + return nil, nil, nil, fmt.Errorf("expected StoredChatMessage, got %T", genMsg) } - messages = append(messages, *compMsg.Message.clean()) + messages = append(messages, *chatMsg.Message.clean()) } - req, err := buildCompletionsHTTPRequest(ctx, messages, chatOpts) + req, err := buildChatHTTPRequest(ctx, messages, chatOpts) if err != nil { return nil, nil, nil, err } @@ -88,21 +88,21 @@ func RunCompletionsChatStep( } // Stream processing - stopReason, assistantMsg, err := processCompletionsStream(ctx, resp.Body, sseHandler, chatOpts, cont) + stopReason, assistantMsg, err := processChatStream(ctx, resp.Body, sseHandler, chatOpts, cont) if err != nil { return nil, nil, nil, err } - return stopReason, []*CompletionsChatMessage{assistantMsg}, nil, nil + return stopReason, []*StoredChatMessage{assistantMsg}, nil, nil } -func processCompletionsStream( +func processChatStream( ctx context.Context, body io.Reader, sseHandler *sse.SSEHandlerCh, chatOpts uctypes.WaveChatOpts, cont *uctypes.WaveContinueResponse, -) (*uctypes.WaveStopReason, *CompletionsChatMessage, error) { +) (*uctypes.WaveStopReason, *StoredChatMessage, error) { decoder := eventsource.NewDecoder(body) var textBuilder strings.Builder msgID := uuid.New().String() @@ -146,7 +146,7 @@ func processCompletionsStream( var chunk StreamChunk if err := json.Unmarshal([]byte(data), &chunk); err != nil { - log.Printf("openaicomp: failed to parse chunk: %v\n", err) + log.Printf("openaichat: failed to parse chunk: %v\n", err) continue } @@ -214,7 +214,7 @@ func processCompletionsStream( var inputJSON any if tc.Function.Arguments != "" { if err := json.Unmarshal([]byte(tc.Function.Arguments), &inputJSON); err != nil { - log.Printf("openaicomp: failed to parse tool call arguments: %v\n", err) + log.Printf("openaichat: failed to parse tool call arguments: %v\n", err) continue } } @@ -232,9 +232,9 @@ func processCompletionsStream( ToolCalls: waveToolCalls, } - assistantMsg := &CompletionsChatMessage{ + assistantMsg := &StoredChatMessage{ MessageId: msgID, - Message: CompletionsMessage{ + Message: ChatRequestMessage{ Role: "assistant", }, } diff --git a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go b/pkg/aiusechat/openaichat/openaichat-convertmessage.go similarity index 82% rename from pkg/aiusechat/openaicomp/openaicomp-convertmessage.go rename to pkg/aiusechat/openaichat/openaichat-convertmessage.go index 17df4220d2..d26c57884f 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-convertmessage.go +++ b/pkg/aiusechat/openaichat/openaichat-convertmessage.go @@ -1,7 +1,7 @@ // Copyright 2025, Command Line Inc. // SPDX-License-Identifier: Apache-2.0 -package openaicomp +package openaichat import ( "bytes" @@ -20,11 +20,11 @@ import ( ) const ( - OpenAICompDefaultMaxTokens = 4096 + OpenAIChatDefaultMaxTokens = 4096 ) // appendToLastUserMessage appends text to the last user message in the messages slice -func appendToLastUserMessage(messages []CompletionsMessage, text string) { +func appendToLastUserMessage(messages []ChatRequestMessage, text string) { for i := len(messages) - 1; i >= 0; i-- { if messages[i].Role == "user" { messages[i].Content += "\n\n" + text @@ -58,8 +58,8 @@ func convertToolDefinitions(waveTools []uctypes.ToolDefinition, capabilities []s return openaiTools } -// buildCompletionsHTTPRequest creates an HTTP request for the OpenAI completions API -func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMessage, chatOpts uctypes.WaveChatOpts) (*http.Request, error) { +// buildChatHTTPRequest creates an HTTP request for the OpenAI chat completions API +func buildChatHTTPRequest(ctx context.Context, messages []ChatRequestMessage, chatOpts uctypes.WaveChatOpts) (*http.Request, error) { opts := chatOpts.Config if opts.Model == "" { @@ -71,16 +71,16 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess maxTokens := opts.MaxTokens if maxTokens <= 0 { - maxTokens = OpenAICompDefaultMaxTokens + maxTokens = OpenAIChatDefaultMaxTokens } finalMessages := messages if len(chatOpts.SystemPrompt) > 0 { - systemMessage := CompletionsMessage{ + systemMessage := ChatRequestMessage{ Role: "system", Content: strings.Join(chatOpts.SystemPrompt, "\n\n"), } - finalMessages = append([]CompletionsMessage{systemMessage}, messages...) + finalMessages = append([]ChatRequestMessage{systemMessage}, messages...) } // injected data @@ -91,7 +91,7 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess appendToLastUserMessage(finalMessages, "\n"+chatOpts.PlatformInfo+"\n") } - reqBody := &CompletionsRequest{ + reqBody := &ChatRequest{ Model: opts.Model, Messages: finalMessages, Stream: true, @@ -114,7 +114,7 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess } if wavebase.IsDevMode() { - log.Printf("openaicomp: model %s, messages: %d, tools: %d\n", opts.Model, len(messages), len(allTools)) + log.Printf("openaichat: model %s, messages: %d, tools: %d\n", opts.Model, len(messages), len(allTools)) } buf, err := json.Marshal(reqBody) @@ -145,9 +145,9 @@ func buildCompletionsHTTPRequest(ctx context.Context, messages []CompletionsMess return req, nil } -// ConvertAIMessageToCompletionsMessage converts an AIMessage to CompletionsChatMessage +// ConvertAIMessageToStoredChatMessage converts an AIMessage to StoredChatMessage // These messages are ALWAYS role "user" -func ConvertAIMessageToCompletionsMessage(aiMsg uctypes.AIMessage) (*CompletionsChatMessage, error) { +func ConvertAIMessageToStoredChatMessage(aiMsg uctypes.AIMessage) (*StoredChatMessage, error) { if err := aiMsg.Validate(); err != nil { return nil, fmt.Errorf("invalid AIMessage: %w", err) } @@ -164,14 +164,14 @@ func ConvertAIMessageToCompletionsMessage(aiMsg uctypes.AIMessage) (*Completions case part.MimeType == "text/plain": textData, err := aiutil.ExtractTextData(part.Data, part.URL) if err != nil { - log.Printf("openaicomp: error extracting text data for %s: %v\n", part.FileName, err) + log.Printf("openaichat: error extracting text data for %s: %v\n", part.FileName, err) continue } partText = aiutil.FormatAttachedTextFile(part.FileName, textData) case part.MimeType == "directory": if len(part.Data) == 0 { - log.Printf("openaicomp: directory listing part missing data for %s\n", part.FileName) + log.Printf("openaichat: directory listing part missing data for %s\n", part.FileName) continue } partText = aiutil.FormatAttachedDirectoryListing(part.FileName, string(part.Data)) @@ -189,9 +189,9 @@ func ConvertAIMessageToCompletionsMessage(aiMsg uctypes.AIMessage) (*Completions } } - return &CompletionsChatMessage{ + return &StoredChatMessage{ MessageId: aiMsg.MessageId, - Message: CompletionsMessage{ + Message: ChatRequestMessage{ Role: "user", Content: textBuilder.String(), }, @@ -213,9 +213,9 @@ func ConvertToolResultsToNativeChatMessage(toolResults []uctypes.AIToolResult) ( content = toolResult.Text } - msg := &CompletionsChatMessage{ + msg := &StoredChatMessage{ MessageId: toolResult.ToolUseID, - Message: CompletionsMessage{ + Message: ChatRequestMessage{ Role: "tool", ToolCallID: toolResult.ToolUseID, Name: toolResult.ToolName, @@ -239,7 +239,7 @@ func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { } for _, genMsg := range aiChat.NativeMessages { - compMsg, ok := genMsg.(*CompletionsChatMessage) + chatMsg, ok := genMsg.(*StoredChatMessage) if !ok { continue } @@ -247,16 +247,16 @@ func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { var parts []uctypes.UIMessagePart // Add text content if present - if compMsg.Message.Content != "" { + if chatMsg.Message.Content != "" { parts = append(parts, uctypes.UIMessagePart{ Type: "text", - Text: compMsg.Message.Content, + Text: chatMsg.Message.Content, }) } // Add tool calls if present (assistant requesting tool use) - if len(compMsg.Message.ToolCalls) > 0 { - for _, toolCall := range compMsg.Message.ToolCalls { + if len(chatMsg.Message.ToolCalls) > 0 { + for _, toolCall := range chatMsg.Message.ToolCalls { if toolCall.Type != "function" { continue } @@ -273,7 +273,7 @@ func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { } // Tool result messages (role "tool") are not converted to UIMessage - if compMsg.Message.Role == "tool" && compMsg.Message.ToolCallID != "" { + if chatMsg.Message.Role == "tool" && chatMsg.Message.ToolCallID != "" { continue } @@ -283,8 +283,8 @@ func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { } uiMsg := uctypes.UIMessage{ - ID: compMsg.MessageId, - Role: compMsg.Message.Role, + ID: chatMsg.MessageId, + Role: chatMsg.Message.Role, Parts: parts, } @@ -297,15 +297,15 @@ func ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { // GetFunctionCallInputByToolCallId searches for a tool call by ID in the chat history func GetFunctionCallInputByToolCallId(aiChat uctypes.AIChat, toolCallId string) *uctypes.AIFunctionCallInput { for _, genMsg := range aiChat.NativeMessages { - compMsg, ok := genMsg.(*CompletionsChatMessage) + chatMsg, ok := genMsg.(*StoredChatMessage) if !ok { continue } - idx := compMsg.Message.FindToolCallIndex(toolCallId) + idx := chatMsg.Message.FindToolCallIndex(toolCallId) if idx == -1 { continue } - toolCall := compMsg.Message.ToolCalls[idx] + toolCall := chatMsg.Message.ToolCalls[idx] return &uctypes.AIFunctionCallInput{ CallId: toolCall.ID, Name: toolCall.Function.Name, @@ -324,15 +324,15 @@ func UpdateToolUseData(chatId string, callId string, newToolUseData uctypes.UIMe } for _, genMsg := range chat.NativeMessages { - compMsg, ok := genMsg.(*CompletionsChatMessage) + chatMsg, ok := genMsg.(*StoredChatMessage) if !ok { continue } - idx := compMsg.Message.FindToolCallIndex(callId) + idx := chatMsg.Message.FindToolCallIndex(callId) if idx == -1 { continue } - updatedMsg := compMsg.Copy() + updatedMsg := chatMsg.Copy() updatedMsg.Message.ToolCalls[idx].ToolUseData = &newToolUseData aiOpts := &uctypes.AIOptsType{ APIType: chat.APIType, diff --git a/pkg/aiusechat/openaicomp/openaicomp-types.go b/pkg/aiusechat/openaichat/openaichat-types.go similarity index 79% rename from pkg/aiusechat/openaicomp/openaicomp-types.go rename to pkg/aiusechat/openaichat/openaichat-types.go index f1ba549653..2475aee059 100644 --- a/pkg/aiusechat/openaicomp/openaicomp-types.go +++ b/pkg/aiusechat/openaichat/openaichat-types.go @@ -1,17 +1,17 @@ // Copyright 2025, Command Line Inc. // SPDX-License-Identifier: Apache-2.0 -package openaicomp +package openaichat import ( "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" ) -// OpenAI Completions API types (simplified) +// OpenAI Chat Completions API types (simplified) -type CompletionsRequest struct { +type ChatRequest struct { Model string `json:"model"` - Messages []CompletionsMessage `json:"messages"` + Messages []ChatRequestMessage `json:"messages"` Stream bool `json:"stream"` MaxTokens int `json:"max_tokens,omitempty"` // legacy MaxCompletionTokens int `json:"max_completion_tokens,omitempty"` // newer @@ -20,7 +20,7 @@ type CompletionsRequest struct { ToolChoice any `json:"tool_choice,omitempty"` // "auto", "none", or struct } -type CompletionsMessage struct { +type ChatRequestMessage struct { Role string `json:"role"` // "system","user","assistant","tool" Content string `json:"content,omitempty"` // normal text messages ToolCalls []ToolCall `json:"tool_calls,omitempty"` // assistant tool-call message @@ -28,7 +28,7 @@ type CompletionsMessage struct { Name string `json:"name,omitempty"` // tool name on role:"tool" } -func (cm *CompletionsMessage) clean() *CompletionsMessage { +func (cm *ChatRequestMessage) clean() *ChatRequestMessage { if len(cm.ToolCalls) == 0 { return cm } @@ -40,7 +40,7 @@ func (cm *CompletionsMessage) clean() *CompletionsMessage { return &rtn } -func (cm *CompletionsMessage) FindToolCallIndex(toolCallId string) int { +func (cm *ChatRequestMessage) FindToolCallIndex(toolCallId string) int { for i, tc := range cm.ToolCalls { if tc.ID == toolCallId { return i @@ -114,41 +114,41 @@ type ToolFunctionDelta struct { Arguments string `json:"arguments,omitempty"` // streamed, append across chunks } -// CompletionsChatMessage is the stored message type -type CompletionsChatMessage struct { +// StoredChatMessage is the stored message type +type StoredChatMessage struct { MessageId string `json:"messageid"` - Message CompletionsMessage `json:"message"` - Usage *CompletionsUsage `json:"usage,omitempty"` + Message ChatRequestMessage `json:"message"` + Usage *ChatUsage `json:"usage,omitempty"` } -type CompletionsUsage struct { - Model string `json:"model,omitempty"` - PromptTokens int `json:"prompt_tokens,omitempty"` - CompletionTokens int `json:"completion_tokens,omitempty"` - TotalTokens int `json:"total_tokens,omitempty"` +type ChatUsage struct { + Model string `json:"model,omitempty"` + InputTokens int `json:"prompt_tokens,omitempty"` + OutputTokens int `json:"completion_tokens,omitempty"` + TotalTokens int `json:"total_tokens,omitempty"` } -func (m *CompletionsChatMessage) GetMessageId() string { +func (m *StoredChatMessage) GetMessageId() string { return m.MessageId } -func (m *CompletionsChatMessage) GetRole() string { +func (m *StoredChatMessage) GetRole() string { return m.Message.Role } -func (m *CompletionsChatMessage) GetUsage() *uctypes.AIUsage { +func (m *StoredChatMessage) GetUsage() *uctypes.AIUsage { if m.Usage == nil { return nil } return &uctypes.AIUsage{ APIType: uctypes.APIType_OpenAIChat, Model: m.Usage.Model, - InputTokens: m.Usage.PromptTokens, - OutputTokens: m.Usage.CompletionTokens, + InputTokens: m.Usage.InputTokens, + OutputTokens: m.Usage.OutputTokens, } } -func (m *CompletionsChatMessage) Copy() *CompletionsChatMessage { +func (m *StoredChatMessage) Copy() *StoredChatMessage { if m == nil { return nil } diff --git a/pkg/aiusechat/usechat-backend.go b/pkg/aiusechat/usechat-backend.go index b0f07d1c2c..528cd3af5c 100644 --- a/pkg/aiusechat/usechat-backend.go +++ b/pkg/aiusechat/usechat-backend.go @@ -9,7 +9,7 @@ import ( "github.com/wavetermdev/waveterm/pkg/aiusechat/anthropic" "github.com/wavetermdev/waveterm/pkg/aiusechat/openai" - "github.com/wavetermdev/waveterm/pkg/aiusechat/openaicomp" + "github.com/wavetermdev/waveterm/pkg/aiusechat/openaichat" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" "github.com/wavetermdev/waveterm/pkg/web/sse" ) @@ -132,7 +132,7 @@ func (b *openaiCompletionsBackend) RunChatStep( chatOpts uctypes.WaveChatOpts, cont *uctypes.WaveContinueResponse, ) (*uctypes.WaveStopReason, []uctypes.GenAIMessage, *uctypes.RateLimitInfo, error) { - stopReason, msgs, rateLimitInfo, err := openaicomp.RunCompletionsChatStep(ctx, sseHandler, chatOpts, cont) + stopReason, msgs, rateLimitInfo, err := openaichat.RunChatStep(ctx, sseHandler, chatOpts, cont) var genMsgs []uctypes.GenAIMessage for _, msg := range msgs { genMsgs = append(genMsgs, msg) @@ -141,23 +141,23 @@ func (b *openaiCompletionsBackend) RunChatStep( } func (b *openaiCompletionsBackend) UpdateToolUseData(chatId string, toolCallId string, toolUseData uctypes.UIMessageDataToolUse) error { - return openaicomp.UpdateToolUseData(chatId, toolCallId, toolUseData) + return openaichat.UpdateToolUseData(chatId, toolCallId, toolUseData) } func (b *openaiCompletionsBackend) ConvertToolResultsToNativeChatMessage(toolResults []uctypes.AIToolResult) ([]uctypes.GenAIMessage, error) { - return openaicomp.ConvertToolResultsToNativeChatMessage(toolResults) + return openaichat.ConvertToolResultsToNativeChatMessage(toolResults) } func (b *openaiCompletionsBackend) ConvertAIMessageToNativeChatMessage(message uctypes.AIMessage) (uctypes.GenAIMessage, error) { - return openaicomp.ConvertAIMessageToCompletionsMessage(message) + return openaichat.ConvertAIMessageToStoredChatMessage(message) } func (b *openaiCompletionsBackend) GetFunctionCallInputByToolCallId(aiChat uctypes.AIChat, toolCallId string) *uctypes.AIFunctionCallInput { - return openaicomp.GetFunctionCallInputByToolCallId(aiChat, toolCallId) + return openaichat.GetFunctionCallInputByToolCallId(aiChat, toolCallId) } func (b *openaiCompletionsBackend) ConvertAIChatToUIChat(aiChat uctypes.AIChat) (*uctypes.UIChat, error) { - return openaicomp.ConvertAIChatToUIChat(aiChat) + return openaichat.ConvertAIChatToUIChat(aiChat) } // anthropicBackend implements UseChatBackend for Anthropic API From 83feb929812cf2fa73d3c17b331d1b7439815a7f Mon Sep 17 00:00:00 2001 From: sawka Date: Wed, 26 Nov 2025 11:25:25 -0800 Subject: [PATCH 31/31] fixing nits --- frontend/app/aipanel/aimode.tsx | 2 +- pkg/aiusechat/openaichat/openaichat-types.go | 12 ++++----- pkg/aiusechat/tools_readdir.go | 14 ++++++++++ pkg/aiusechat/tools_readfile.go | 8 ++++++ pkg/aiusechat/tools_writefile.go | 28 ++++++++++++++++++++ pkg/aiusechat/usechat-prompts.go | 4 +-- pkg/aiusechat/usechat.go | 2 +- pkg/wconfig/settingsconfig.go | 2 +- 8 files changed, 61 insertions(+), 11 deletions(-) diff --git a/frontend/app/aipanel/aimode.tsx b/frontend/app/aipanel/aimode.tsx index cf04851cdb..d5ec9d3063 100644 --- a/frontend/app/aipanel/aimode.tsx +++ b/frontend/app/aipanel/aimode.tsx @@ -61,7 +61,7 @@ export const AIModeDropdown = memo(() => { "group flex items-center gap-1.5 px-2 py-1 text-xs text-gray-300 hover:text-white rounded transition-colors cursor-pointer border border-gray-600/50", isOpen ? "bg-gray-700" : "bg-gray-800/50 hover:bg-gray-700" )} - title={`Thinking: ${displayConfig["display:name"]}`} + title={`AI Mode: ${displayConfig["display:name"]}`} > diff --git a/pkg/aiusechat/openaichat/openaichat-types.go b/pkg/aiusechat/openaichat/openaichat-types.go index 2475aee059..f0bcc41614 100644 --- a/pkg/aiusechat/openaichat/openaichat-types.go +++ b/pkg/aiusechat/openaichat/openaichat-types.go @@ -152,20 +152,20 @@ func (m *StoredChatMessage) Copy() *StoredChatMessage { if m == nil { return nil } - copy := *m + copied := *m if len(m.Message.ToolCalls) > 0 { - copy.Message.ToolCalls = make([]ToolCall, len(m.Message.ToolCalls)) + copied.Message.ToolCalls = make([]ToolCall, len(m.Message.ToolCalls)) for i, tc := range m.Message.ToolCalls { - copy.Message.ToolCalls[i] = tc + copied.Message.ToolCalls[i] = tc if tc.ToolUseData != nil { toolUseDataCopy := *tc.ToolUseData - copy.Message.ToolCalls[i].ToolUseData = &toolUseDataCopy + copied.Message.ToolCalls[i].ToolUseData = &toolUseDataCopy } } } if m.Usage != nil { usageCopy := *m.Usage - copy.Usage = &usageCopy + copied.Usage = &usageCopy } - return © + return &copied } diff --git a/pkg/aiusechat/tools_readdir.go b/pkg/aiusechat/tools_readdir.go index f5d03578fe..da7d568f84 100644 --- a/pkg/aiusechat/tools_readdir.go +++ b/pkg/aiusechat/tools_readdir.go @@ -6,6 +6,7 @@ package aiusechat import ( "fmt" "os" + "path/filepath" "github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes" "github.com/wavetermdev/waveterm/pkg/util/fileutil" @@ -63,6 +64,10 @@ func verifyReadDirInput(input any, toolUseData *uctypes.UIMessageDataToolUse) er return fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return fmt.Errorf("path must be absolute, got relative path: %s", params.Path) + } + fileInfo, err := os.Stat(expandedPath) if err != nil { return fmt.Errorf("failed to stat path: %w", err) @@ -81,6 +86,15 @@ func readDirCallback(input any, toolUseData *uctypes.UIMessageDataToolUse) (any, return nil, err } + expandedPath, err := wavebase.ExpandHomeDir(params.Path) + if err != nil { + return nil, fmt.Errorf("failed to expand path: %w", err) + } + + if !filepath.IsAbs(expandedPath) { + return nil, fmt.Errorf("path must be absolute, got relative path: %s", params.Path) + } + result, err := fileutil.ReadDir(params.Path, *params.MaxEntries) if err != nil { return nil, err diff --git a/pkg/aiusechat/tools_readfile.go b/pkg/aiusechat/tools_readfile.go index 13fa5bc34d..eecc2385b6 100644 --- a/pkg/aiusechat/tools_readfile.go +++ b/pkg/aiusechat/tools_readfile.go @@ -208,6 +208,10 @@ func verifyReadTextFileInput(input any, toolUseData *uctypes.UIMessageDataToolUs return fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return fmt.Errorf("path must be absolute, got relative path: %s", params.Filename) + } + if blocked, reason := isBlockedFile(expandedPath); blocked { return fmt.Errorf("access denied: potentially sensitive file: %s", reason) } @@ -237,6 +241,10 @@ func readTextFileCallback(input any, toolUseData *uctypes.UIMessageDataToolUse) return nil, fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return nil, fmt.Errorf("path must be absolute, got relative path: %s", params.Filename) + } + if blocked, reason := isBlockedFile(expandedPath); blocked { return nil, fmt.Errorf("access denied: potentially sensitive file: %s", reason) } diff --git a/pkg/aiusechat/tools_writefile.go b/pkg/aiusechat/tools_writefile.go index 91af47ef10..d554cfab09 100644 --- a/pkg/aiusechat/tools_writefile.go +++ b/pkg/aiusechat/tools_writefile.go @@ -112,6 +112,10 @@ func verifyWriteTextFileInput(input any, toolUseData *uctypes.UIMessageDataToolU return fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return fmt.Errorf("path must be absolute, got relative path: %s", params.Filename) + } + contentsBytes := []byte(params.Contents) if utilfn.HasBinaryData(contentsBytes) { return fmt.Errorf("contents appear to contain binary data") @@ -137,6 +141,10 @@ func writeTextFileCallback(input any, toolUseData *uctypes.UIMessageDataToolUse) return nil, fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return nil, fmt.Errorf("path must be absolute, got relative path: %s", params.Filename) + } + contentsBytes := []byte(params.Contents) if utilfn.HasBinaryData(contentsBytes) { return nil, fmt.Errorf("contents appear to contain binary data") @@ -247,6 +255,10 @@ func verifyEditTextFileInput(input any, toolUseData *uctypes.UIMessageDataToolUs return fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return fmt.Errorf("path must be absolute, got relative path: %s", params.Filename) + } + _, err = validateTextFile(expandedPath, "edit", true) if err != nil { return err @@ -269,6 +281,10 @@ func EditTextFileDryRun(input any, fileOverride string) ([]byte, []byte, error) return nil, nil, fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return nil, nil, fmt.Errorf("path must be absolute, got relative path: %s", params.Filename) + } + _, err = validateTextFile(expandedPath, "edit", true) if err != nil { return nil, nil, err @@ -303,6 +319,10 @@ func editTextFileCallback(input any, toolUseData *uctypes.UIMessageDataToolUse) return nil, fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return nil, fmt.Errorf("path must be absolute, got relative path: %s", params.Filename) + } + _, err = validateTextFile(expandedPath, "edit", true) if err != nil { return nil, err @@ -422,6 +442,10 @@ func verifyDeleteTextFileInput(input any, toolUseData *uctypes.UIMessageDataTool return fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return fmt.Errorf("path must be absolute, got relative path: %s", params.Filename) + } + _, err = validateTextFile(expandedPath, "delete", true) if err != nil { return err @@ -442,6 +466,10 @@ func deleteTextFileCallback(input any, toolUseData *uctypes.UIMessageDataToolUse return nil, fmt.Errorf("failed to expand path: %w", err) } + if !filepath.IsAbs(expandedPath) { + return nil, fmt.Errorf("path must be absolute, got relative path: %s", params.Filename) + } + _, err = validateTextFile(expandedPath, "delete", true) if err != nil { return nil, err diff --git a/pkg/aiusechat/usechat-prompts.go b/pkg/aiusechat/usechat-prompts.go index 3761995a62..b8bcb7aa03 100644 --- a/pkg/aiusechat/usechat-prompts.go +++ b/pkg/aiusechat/usechat-prompts.go @@ -8,7 +8,7 @@ import "strings" var SystemPromptText = strings.Join([]string{ `You are Wave AI, an intelligent assistant embedded within Wave Terminal, a modern terminal application with graphical widgets.`, `You appear as a pull-out panel on the left side of a tab, with the tab's widgets laid out on the right.`, - `Widget context is provided as informationa only.`, + `Widget context is provided as informational only.`, `Do NOT assume any API access or ability to interact with the widgets except via tools provided (note that some widgets may expose NO tools, so their context is informational only).`, }, " ") @@ -52,7 +52,7 @@ var SystemPromptText_OpenAI = strings.Join([]string{ var SystemPromptText_StrictToolAddOn = `## Tool Call Rules (STRICT) -When you decide a file write/edit tool call is needed:' +When you decide a file write/edit tool call is needed: - Output ONLY the tool call. - Do NOT include any explanation, summary, or file content in the chat. diff --git a/pkg/aiusechat/usechat.go b/pkg/aiusechat/usechat.go index 76895c7209..477b0001c4 100644 --- a/pkg/aiusechat/usechat.go +++ b/pkg/aiusechat/usechat.go @@ -419,7 +419,7 @@ func RunAIChat(ctx context.Context, sseHandler *sse.SSEHandlerCh, backend UseCha } firstStep = false if stopReason != nil && stopReason.Kind == uctypes.StopKindPremiumRateLimit && chatOpts.Config.APIType == uctypes.APIType_OpenAIResponses && chatOpts.Config.Model == uctypes.PremiumOpenAIModel { - log.Printf("Premium rate limit hit with gpt-5.1, switching to gpt-5-mini\n") + log.Printf("Premium rate limit hit with %s, switching to %s\n", uctypes.PremiumOpenAIModel, uctypes.DefaultOpenAIModel) cont = &uctypes.WaveContinueResponse{ Model: uctypes.DefaultOpenAIModel, ContinueFromKind: uctypes.StopKindPremiumRateLimit, diff --git a/pkg/wconfig/settingsconfig.go b/pkg/wconfig/settingsconfig.go index abded1f35e..c493cf49d5 100644 --- a/pkg/wconfig/settingsconfig.go +++ b/pkg/wconfig/settingsconfig.go @@ -272,7 +272,7 @@ type AIModeConfigType struct { APITokenSecretName string `json:"ai:apitokensecretname,omitempty"` Capabilities []string `json:"ai:capabilities,omitempty"` WaveAICloud bool `json:"waveai:cloud,omitempty"` - WaveAIPremium bool `json:"waveai:premium"` + WaveAIPremium bool `json:"waveai:premium,omitempty"` } type FullConfigType struct {