Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
32 commits
Select commit Hold shift + click to select a range
cca274b
first cut at a text-only backend for the /v1/chat/completions api
sawka Nov 24, 2025
0e4a872
test with mistral on openrouter
sawka Nov 24, 2025
2df1efc
text file parts and add tab state and platforminfo...
sawka Nov 24, 2025
56e19fd
move thinking mode to static initialization
sawka Nov 24, 2025
bf7324e
backend usechat thinking modes
sawka Nov 24, 2025
c81e0c6
pull thinkingmodes from the backend
sawka Nov 25, 2025
27e1ec5
capabilities... in thinkingmodeconfig...
sawka Nov 25, 2025
c6f2f72
use makeiconclass
sawka Nov 25, 2025
12344e3
rename usechat-types.go (for AI). change display fields to have more…
sawka Nov 25, 2025
091cb0c
remove unused const + format
sawka Nov 25, 2025
3399a6e
add tools, check capabilities
sawka Nov 25, 2025
5b786c7
convert tool parts
sawka Nov 25, 2025
a65a132
working on tools
sawka Nov 25, 2025
09fd9bb
convert to role "tool" messages
sawka Nov 25, 2025
2644d9b
fix so a return message has EITHER tool calls OR content
sawka Nov 25, 2025
597a67b
implement two tool functions in the backend interface
sawka Nov 25, 2025
f11cc8d
make sendToolProgress and createToolUseData generic
sawka Nov 25, 2025
b2862e3
move toolusedata to the usechat pkg and out of the backends
sawka Nov 25, 2025
a3bb27c
Merge remote-tracking branch 'origin/main' into sawka/waveai-openai-c…
sawka Nov 25, 2025
2d1567c
get tools working
sawka Nov 25, 2025
2d7ce41
move thinkingmodes to config
sawka Nov 25, 2025
13c84b6
update consts for new names
sawka Nov 25, 2025
a8cf3a9
stricter about absolute file paths
sawka Nov 25, 2025
6d2b340
update prompt for better tool calling in some models
sawka Nov 26, 2025
447d714
use config obj directly in backend for thinking mode config
sawka Nov 26, 2025
d7194e1
rename thinkingmode => ai mode
sawka Nov 26, 2025
df40388
use ai mode from config instead of rpc
sawka Nov 26, 2025
2c85b50
fix formatting
sawka Nov 26, 2025
f07c7aa
remove all hard coded APIType strings (unify to uctypes)
sawka Nov 26, 2025
c94e407
update apitype names
sawka Nov 26, 2025
872eee5
big rename. openaicomp => openaichat
sawka Nov 26, 2025
83feb92
fixing nits
sawka Nov 26, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
140 changes: 133 additions & 7 deletions cmd/testai/main-testai.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,8 +24,9 @@ import (
var testSchemaJSON string

const (
DefaultAnthropicModel = "claude-sonnet-4-5"
DefaultOpenAIModel = "gpt-5.1"
DefaultAnthropicModel = "claude-sonnet-4-5"
DefaultOpenAIModel = "gpt-5.1"
DefaultOpenRouterModel = "mistralai/mistral-small-3.2-24b-instruct"
)

// TestResponseWriter implements http.ResponseWriter and additional interfaces for testing
Expand Down Expand Up @@ -113,7 +114,7 @@ func testOpenAI(ctx context.Context, model, message string, tools []uctypes.Tool
}

opts := &uctypes.AIOptsType{
APIType: aiusechat.APIType_OpenAI,
APIType: uctypes.APIType_OpenAIResponses,
APIToken: apiKey,
Model: model,
MaxTokens: 4096,
Expand Down Expand Up @@ -155,6 +156,106 @@ func testOpenAI(ctx context.Context, model, message string, tools []uctypes.Tool
}
}

func testOpenAIComp(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) {
apiKey := os.Getenv("OPENAI_APIKEY")
if apiKey == "" {
fmt.Println("Error: OPENAI_APIKEY environment variable not set")
os.Exit(1)
}

opts := &uctypes.AIOptsType{
APIType: uctypes.APIType_OpenAIChat,
APIToken: apiKey,
BaseURL: "https://api.openai.com/v1/chat/completions",
Model: model,
MaxTokens: 4096,
ThinkingLevel: uctypes.ThinkingLevelMedium,
}

chatID := uuid.New().String()

aiMessage := &uctypes.AIMessage{
MessageId: uuid.New().String(),
Parts: []uctypes.AIMessagePart{
{
Type: uctypes.AIMessagePartTypeText,
Text: message,
},
},
}

fmt.Printf("Testing OpenAI Completions API with WaveAIPostMessageWrap, model: %s\n", model)
fmt.Printf("Message: %s\n", message)
fmt.Printf("Chat ID: %s\n", chatID)
fmt.Println("---")

testWriter := &TestResponseWriter{}
sseHandler := sse.MakeSSEHandlerCh(testWriter, ctx)
defer sseHandler.Close()

chatOpts := uctypes.WaveChatOpts{
ChatId: chatID,
ClientId: uuid.New().String(),
Config: *opts,
Tools: tools,
SystemPrompt: []string{"You are a helpful assistant. Be concise and clear in your responses."},
}
err := aiusechat.WaveAIPostMessageWrap(ctx, sseHandler, aiMessage, chatOpts)
if err != nil {
fmt.Printf("OpenAI Completions API streaming error: %v\n", err)
}
}

func testOpenRouter(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) {
apiKey := os.Getenv("OPENROUTER_APIKEY")
if apiKey == "" {
fmt.Println("Error: OPENROUTER_APIKEY environment variable not set")
os.Exit(1)
}

opts := &uctypes.AIOptsType{
APIType: uctypes.APIType_OpenAIChat,
APIToken: apiKey,
BaseURL: "https://openrouter.ai/api/v1/chat/completions",
Model: model,
MaxTokens: 4096,
ThinkingLevel: uctypes.ThinkingLevelMedium,
}

chatID := uuid.New().String()

aiMessage := &uctypes.AIMessage{
MessageId: uuid.New().String(),
Parts: []uctypes.AIMessagePart{
{
Type: uctypes.AIMessagePartTypeText,
Text: message,
},
},
}

fmt.Printf("Testing OpenRouter with WaveAIPostMessageWrap, model: %s\n", model)
fmt.Printf("Message: %s\n", message)
fmt.Printf("Chat ID: %s\n", chatID)
fmt.Println("---")

testWriter := &TestResponseWriter{}
sseHandler := sse.MakeSSEHandlerCh(testWriter, ctx)
defer sseHandler.Close()

chatOpts := uctypes.WaveChatOpts{
ChatId: chatID,
ClientId: uuid.New().String(),
Config: *opts,
Tools: tools,
SystemPrompt: []string{"You are a helpful assistant. Be concise and clear in your responses."},
}
err := aiusechat.WaveAIPostMessageWrap(ctx, sseHandler, aiMessage, chatOpts)
if err != nil {
fmt.Printf("OpenRouter streaming error: %v\n", err)
}
}

func testAnthropic(ctx context.Context, model, message string, tools []uctypes.ToolDefinition) {
apiKey := os.Getenv("ANTHROPIC_APIKEY")
if apiKey == "" {
Expand All @@ -163,7 +264,7 @@ func testAnthropic(ctx context.Context, model, message string, tools []uctypes.T
}

opts := &uctypes.AIOptsType{
APIType: aiusechat.APIType_Anthropic,
APIType: uctypes.APIType_AnthropicMessages,
APIToken: apiKey,
Model: model,
MaxTokens: 4096,
Expand Down Expand Up @@ -217,33 +318,46 @@ func testT2(ctx context.Context) {
testOpenAI(ctx, DefaultOpenAIModel, "what is 2+2+8, use the provider adder tool", tools)
}

func testT3(ctx context.Context) {
testOpenAIComp(ctx, "gpt-4o", "what is 2+2? please be brief", nil)
}

func printUsage() {
fmt.Println("Usage: go run main-testai.go [--anthropic] [--tools] [--model <model>] [message]")
fmt.Println("Usage: go run main-testai.go [--anthropic|--openaicomp|--openrouter] [--tools] [--model <model>] [message]")
fmt.Println("Examples:")
fmt.Println(" go run main-testai.go 'What is 2+2?'")
fmt.Println(" go run main-testai.go --model o4-mini 'What is 2+2?'")
fmt.Println(" go run main-testai.go --anthropic 'What is 2+2?'")
fmt.Println(" go run main-testai.go --anthropic --model claude-3-5-sonnet-20241022 'What is 2+2?'")
fmt.Println(" go run main-testai.go --openaicomp --model gpt-4o 'What is 2+2?'")
fmt.Println(" go run main-testai.go --openrouter 'What is 2+2?'")
fmt.Println(" go run main-testai.go --openrouter --model anthropic/claude-3.5-sonnet 'What is 2+2?'")
fmt.Println(" go run main-testai.go --tools 'Help me configure GitHub Actions monitoring'")
fmt.Println("")
fmt.Println("Default models:")
fmt.Printf(" OpenAI: %s\n", DefaultOpenAIModel)
fmt.Printf(" Anthropic: %s\n", DefaultAnthropicModel)
fmt.Printf(" OpenAI Completions: gpt-4o\n")
fmt.Printf(" OpenRouter: %s\n", DefaultOpenRouterModel)
fmt.Println("")
fmt.Println("Environment variables:")
fmt.Println(" OPENAI_APIKEY (for OpenAI models)")
fmt.Println(" ANTHROPIC_APIKEY (for Anthropic models)")
fmt.Println(" OPENROUTER_APIKEY (for OpenRouter models)")
}

func main() {
var anthropic, tools, help, t1, t2 bool
var anthropic, openaicomp, openrouter, tools, help, t1, t2, t3 bool
var model string
flag.BoolVar(&anthropic, "anthropic", false, "Use Anthropic API instead of OpenAI")
flag.BoolVar(&openaicomp, "openaicomp", false, "Use OpenAI Completions API")
flag.BoolVar(&openrouter, "openrouter", false, "Use OpenRouter API")
flag.BoolVar(&tools, "tools", false, "Enable GitHub Actions Monitor tools for testing")
flag.StringVar(&model, "model", "", fmt.Sprintf("AI model to use (defaults: %s for OpenAI, %s for Anthropic)", DefaultOpenAIModel, DefaultAnthropicModel))
flag.StringVar(&model, "model", "", fmt.Sprintf("AI model to use (defaults: %s for OpenAI, %s for Anthropic, %s for OpenRouter)", DefaultOpenAIModel, DefaultAnthropicModel, DefaultOpenRouterModel))
flag.BoolVar(&help, "help", false, "Show usage information")
flag.BoolVar(&t1, "t1", false, fmt.Sprintf("Run preset T1 test (%s with 'what is 2+2')", DefaultAnthropicModel))
flag.BoolVar(&t2, "t2", false, fmt.Sprintf("Run preset T2 test (%s with 'what is 2+2')", DefaultOpenAIModel))
flag.BoolVar(&t3, "t3", false, "Run preset T3 test (OpenAI Completions API with gpt-4o)")
flag.Parse()

if help {
Expand All @@ -262,11 +376,19 @@ func main() {
testT2(ctx)
return
}
if t3 {
testT3(ctx)
return
}

// Set default model based on API type if not provided
if model == "" {
if anthropic {
model = DefaultAnthropicModel
} else if openaicomp {
model = "gpt-4o"
} else if openrouter {
model = DefaultOpenRouterModel
} else {
model = DefaultOpenAIModel
}
Expand All @@ -285,6 +407,10 @@ func main() {

if anthropic {
testAnthropic(ctx, model, message, toolDefs)
} else if openaicomp {
testOpenAIComp(ctx, model, message, toolDefs)
} else if openrouter {
testOpenRouter(ctx, model, message, toolDefs)
} else {
testOpenAI(ctx, model, message, toolDefs)
}
Expand Down
116 changes: 116 additions & 0 deletions frontend/app/aipanel/aimode.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
// Copyright 2025, Command Line Inc.
// SPDX-License-Identifier: Apache-2.0

import { atoms } from "@/app/store/global";
import { cn, makeIconClass } from "@/util/util";
import { useAtomValue } from "jotai";
import { memo, useRef, useState } from "react";
import { WaveAIModel } from "./waveai-model";

export const AIModeDropdown = memo(() => {
const model = WaveAIModel.getInstance();
const aiMode = useAtomValue(model.currentAIMode);
const aiModeConfigs = useAtomValue(model.aiModeConfigs);
const rateLimitInfo = useAtomValue(atoms.waveAIRateLimitInfoAtom);
const [isOpen, setIsOpen] = useState(false);
const dropdownRef = useRef<HTMLDivElement>(null);

const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0;
const hideQuick = model.inBuilder && hasPremium;

const sortedConfigs = Object.entries(aiModeConfigs)
.map(([mode, config]) => ({ mode, ...config }))
.sort((a, b) => {
const orderDiff = (a["display:order"] || 0) - (b["display:order"] || 0);
if (orderDiff !== 0) return orderDiff;
return (a["display:name"] || "").localeCompare(b["display:name"] || "");
})
.filter((config) => !(hideQuick && config.mode === "waveai@quick"));

const handleSelect = (mode: string) => {
const config = aiModeConfigs[mode];
if (!config) return;
if (!hasPremium && config["waveai:premium"]) {
return;
}
model.setAIMode(mode);
setIsOpen(false);
};
Comment on lines +30 to +38
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Consider adding user feedback when premium mode selection is blocked.

When a user without premium access attempts to select a premium mode, the function silently returns (lines 33-35) without any notification or feedback. Consider adding a toast message or alert to inform users why the mode cannot be selected (e.g., "Premium subscription required for this mode").

Example implementation:

 const handleSelect = (mode: string) => {
     const config = aiModeConfigs[mode];
     if (!config) return;
     if (!hasPremium && config["waveai:premium"]) {
+        // Show toast or notification: "Premium subscription required"
         return;
     }
     model.setAIMode(mode);
     setIsOpen(false);
 };

Committable suggestion skipped: line range outside the PR's diff.

🤖 Prompt for AI Agents
In frontend/app/aipanel/aimode.tsx around lines 30 to 38, the handler silently
returns when a non-premium user selects a premium mode; update handleSelect so
that instead of silently returning it triggers a user-facing notification
(toast/alert) like "Premium subscription required for this mode" and then
returns, leaving the UI state unchanged; use the project's existing
notification/toast utility or a simple alert if none exists, ensure the message
is clear, do not close the modal or call model.setAIMode when blocked, and keep
the early return after emitting the notification.


let currentMode = aiMode || "waveai@balanced";
const currentConfig = aiModeConfigs[currentMode];
if (currentConfig) {
if (!hasPremium && currentConfig["waveai:premium"]) {
currentMode = "waveai@quick";
}
if (hideQuick && currentMode === "waveai@quick") {
currentMode = "waveai@balanced";
}
}
Comment on lines +40 to +49
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Premium fallback logic may cause UI inconsistency.

The currentMode normalization (lines 40-49) silently overrides the user's selected mode based on premium status without updating the persisted state. This could cause confusion where the UI shows one mode but the atom holds a different value.

Consider calling model.setAIMode(currentMode) when a fallback is applied, or ensure the backend handles this normalization so the frontend stays in sync.

🤖 Prompt for AI Agents
In frontend/app/aipanel/aimode.tsx around lines 40 to 49, currentMode is being
silently overridden when the user lacks premium or hideQuick constraints are
applied, causing the UI to display a mode different from the persisted atom;
update the persisted state when a fallback occurs by calling
model.setAIMode(currentMode) (or the appropriate setter) immediately after you
change currentMode so the atom/backend stays in sync, and ensure this call is
only made when the normalized mode differs from the incoming aiMode to avoid
unnecessary writes.


const displayConfig = aiModeConfigs[currentMode] || {
"display:name": "? Unknown",
"display:icon": "question",
};

return (
<div className="relative" ref={dropdownRef}>
<button
onClick={() => setIsOpen(!isOpen)}
className={cn(
"group flex items-center gap-1.5 px-2 py-1 text-xs text-gray-300 hover:text-white rounded transition-colors cursor-pointer border border-gray-600/50",
isOpen ? "bg-gray-700" : "bg-gray-800/50 hover:bg-gray-700"
)}
title={`AI Mode: ${displayConfig["display:name"]}`}
>
<i className={cn(makeIconClass(displayConfig["display:icon"], false), "text-[10px]")}></i>
<span className={`text-[11px] ${isOpen ? "inline" : "hidden group-hover:inline @w450:inline"}`}>
{displayConfig["display:name"]}
</span>
<i className="fa fa-chevron-down text-[8px]"></i>
</button>

{isOpen && (
<>
<div className="fixed inset-0 z-40" onClick={() => setIsOpen(false)} />
<div className="absolute top-full left-0 mt-1 bg-gray-800 border border-gray-600 rounded shadow-lg z-50 min-w-[280px]">
{sortedConfigs.map((config, index) => {
const isFirst = index === 0;
const isLast = index === sortedConfigs.length - 1;
const isDisabled = !hasPremium && config["waveai:premium"];
const isSelected = currentMode === config.mode;
return (
<button
key={config.mode}
onClick={() => handleSelect(config.mode)}
disabled={isDisabled}
className={`w-full flex flex-col gap-0.5 px-3 ${
isFirst ? "pt-1 pb-0.5" : isLast ? "pt-0.5 pb-1" : "pt-0.5 pb-0.5"
} ${
isDisabled
? "text-gray-500 cursor-not-allowed"
: "text-gray-300 hover:bg-gray-700 cursor-pointer"
} transition-colors text-left`}
>
<div className="flex items-center gap-2 w-full">
<i className={makeIconClass(config["display:icon"], false)}></i>
<span className={`text-sm ${isSelected ? "font-bold" : ""}`}>
{config["display:name"]}
{isDisabled && " (premium)"}
</span>
{isSelected && <i className="fa fa-check ml-auto"></i>}
</div>
<div className="text-xs text-muted pl-5" style={{ whiteSpace: "pre-line" }}>
{config["display:description"]}
</div>
</button>
);
})}
</div>
</>
)}
</div>
);
});

AIModeDropdown.displayName = "AIModeDropdown";
20 changes: 10 additions & 10 deletions frontend/app/aipanel/aipanel-contextmenu.ts
Original file line number Diff line number Diff line change
Expand Up @@ -41,45 +41,45 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo

const rateLimitInfo = globalStore.get(atoms.waveAIRateLimitInfoAtom);
const hasPremium = !rateLimitInfo || rateLimitInfo.unknown || rateLimitInfo.preq > 0;
const currentThinkingMode = rtInfo?.["waveai:thinkingmode"] ?? (hasPremium ? "balanced" : "quick");
const currentAIMode = rtInfo?.["waveai:mode"] ?? (hasPremium ? "waveai@balanced" : "waveai@quick");
const defaultTokens = model.inBuilder ? 24576 : 4096;
const currentMaxTokens = rtInfo?.["waveai:maxoutputtokens"] ?? defaultTokens;

const thinkingModeSubmenu: ContextMenuItem[] = [
const aiModeSubmenu: ContextMenuItem[] = [
{
label: "Quick (gpt-5-mini)",
type: "checkbox",
checked: currentThinkingMode === "quick",
checked: currentAIMode === "waveai@quick",
click: () => {
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: model.orefContext,
data: { "waveai:thinkingmode": "quick" },
data: { "waveai:mode": "waveai@quick" },
});
},
},
{
label: hasPremium ? "Balanced (gpt-5.1, low thinking)" : "Balanced (premium)",
type: "checkbox",
checked: currentThinkingMode === "balanced",
checked: currentAIMode === "waveai@balanced",
enabled: hasPremium,
click: () => {
if (!hasPremium) return;
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: model.orefContext,
data: { "waveai:thinkingmode": "balanced" },
data: { "waveai:mode": "waveai@balanced" },
});
},
},
{
label: hasPremium ? "Deep (gpt-5.1, full thinking)" : "Deep (premium)",
type: "checkbox",
checked: currentThinkingMode === "deep",
checked: currentAIMode === "waveai@deep",
enabled: hasPremium,
click: () => {
if (!hasPremium) return;
RpcApi.SetRTInfoCommand(TabRpcClient, {
oref: model.orefContext,
data: { "waveai:thinkingmode": "deep" },
data: { "waveai:mode": "waveai@deep" },
});
},
},
Expand Down Expand Up @@ -164,8 +164,8 @@ export async function handleWaveAIContextMenu(e: React.MouseEvent, showCopy: boo
}

menu.push({
label: "Thinking Mode",
submenu: thinkingModeSubmenu,
label: "AI Mode",
submenu: aiModeSubmenu,
});

menu.push({
Expand Down
Loading
Loading